< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorMove.java

Print this page
rev 56282 : [mq]: graal
   1 /*
   2  * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64.vector;
  26 
  27 import static jdk.vm.ci.code.ValueUtil.asRegister;
  28 import static jdk.vm.ci.code.ValueUtil.isRegister;
  29 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVD;
  31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVDQU;
  32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVQ;
  33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVSD;
  34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVSS;
  35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVUPD;
  36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVUPS;
  37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  39 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  40 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  41 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  42 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  43 
  44 import org.graalvm.compiler.asm.amd64.AMD64Address;
  45 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp;
  46 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  47 import org.graalvm.compiler.asm.amd64.AVXKind;
  48 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
  49 import org.graalvm.compiler.debug.GraalError;
  50 import org.graalvm.compiler.lir.LIRFrameState;
  51 import org.graalvm.compiler.lir.LIRInstructionClass;


 249     public static class VectorStoreOp extends VectorMemOp {
 250         public static final LIRInstructionClass<VectorStoreOp> TYPE = LIRInstructionClass.create(VectorStoreOp.class);
 251 
 252         @Use({REG}) protected AllocatableValue input;
 253 
 254         public VectorStoreOp(AVXSize size, VexMoveOp op, AMD64AddressValue address, AllocatableValue input, LIRFrameState state) {
 255             super(TYPE, size, op, address, state);
 256             this.input = input;
 257         }
 258 
 259         @Override
 260         public void emitMemAccess(AMD64MacroAssembler masm) {
 261             op.emit(masm, size, address.toAddress(), asRegister(input));
 262         }
 263     }
 264 
 265     @Opcode("SAVE_REGISTER")
 266     public static class SaveRegistersOp extends AMD64SaveRegistersOp {
 267         public static final LIRInstructionClass<SaveRegistersOp> TYPE = LIRInstructionClass.create(SaveRegistersOp.class);
 268 
 269         public SaveRegistersOp(Register[] savedRegisters, AllocatableValue[] slots, boolean supportsRemove) {
 270             super(TYPE, savedRegisters, slots, supportsRemove);
 271         }
 272 
 273         @Override
 274         protected void saveRegister(CompilationResultBuilder crb, AMD64MacroAssembler masm, StackSlot result, Register register) {
 275             AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
 276             if (kind.isXMM()) {
 277                 VexMoveOp op;
 278                 if (kind.getVectorLength() > 1) {
 279                     op = getVectorMoveOp(kind.getScalar());
 280                 } else {
 281                     op = getScalarMoveOp(kind);
 282                 }
 283 
 284                 AMD64Address addr = (AMD64Address) crb.asAddress(result);
 285                 op.emit(masm, AVXKind.getRegisterSize(kind), addr, register);
 286             } else {
 287                 super.saveRegister(crb, masm, result, register);
 288             }
 289         }
 290     }


 317     }
 318 
 319     private static VexMoveOp getScalarMoveOp(AMD64Kind kind) {
 320         switch (kind) {
 321             case SINGLE:
 322                 return VMOVSS;
 323             case DOUBLE:
 324                 return VMOVSD;
 325             default:
 326                 throw GraalError.shouldNotReachHere();
 327         }
 328     }
 329 
 330     private static VexMoveOp getVectorMoveOp(AMD64Kind kind) {
 331         switch (kind) {
 332             case SINGLE:
 333                 return VMOVUPS;
 334             case DOUBLE:
 335                 return VMOVUPD;
 336             default:
 337                 return VMOVDQU;
 338         }
 339     }
 340 
 341     private static VexMoveOp getVectorMemMoveOp(AMD64Kind kind) {
 342         switch (AVXKind.getDataSize(kind)) {
 343             case DWORD:
 344                 return VMOVD;
 345             case QWORD:
 346                 return VMOVQ;
 347             default:
 348                 return getVectorMoveOp(kind.getScalar());
 349         }
 350     }
 351 
 352     private static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, AllocatableValue result, Value input) {
 353         VexMoveOp op;
 354         AVXSize size;
 355         AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
 356         if (kind.getVectorLength() > 1) {
 357             size = AVXKind.getRegisterSize(kind);


   1 /*
   2  * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64.vector;
  26 
  27 import static jdk.vm.ci.code.ValueUtil.asRegister;
  28 import static jdk.vm.ci.code.ValueUtil.isRegister;
  29 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVD;
  31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVDQU32;
  32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVQ;
  33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVSD;
  34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVSS;
  35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVUPD;
  36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp.VMOVUPS;
  37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  39 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  40 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  41 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  42 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  43 
  44 import org.graalvm.compiler.asm.amd64.AMD64Address;
  45 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexMoveOp;
  46 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  47 import org.graalvm.compiler.asm.amd64.AVXKind;
  48 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
  49 import org.graalvm.compiler.debug.GraalError;
  50 import org.graalvm.compiler.lir.LIRFrameState;
  51 import org.graalvm.compiler.lir.LIRInstructionClass;


 249     public static class VectorStoreOp extends VectorMemOp {
 250         public static final LIRInstructionClass<VectorStoreOp> TYPE = LIRInstructionClass.create(VectorStoreOp.class);
 251 
 252         @Use({REG}) protected AllocatableValue input;
 253 
 254         public VectorStoreOp(AVXSize size, VexMoveOp op, AMD64AddressValue address, AllocatableValue input, LIRFrameState state) {
 255             super(TYPE, size, op, address, state);
 256             this.input = input;
 257         }
 258 
 259         @Override
 260         public void emitMemAccess(AMD64MacroAssembler masm) {
 261             op.emit(masm, size, address.toAddress(), asRegister(input));
 262         }
 263     }
 264 
 265     @Opcode("SAVE_REGISTER")
 266     public static class SaveRegistersOp extends AMD64SaveRegistersOp {
 267         public static final LIRInstructionClass<SaveRegistersOp> TYPE = LIRInstructionClass.create(SaveRegistersOp.class);
 268 
 269         public SaveRegistersOp(Register[] savedRegisters, AllocatableValue[] slots) {
 270             super(TYPE, savedRegisters, slots);
 271         }
 272 
 273         @Override
 274         protected void saveRegister(CompilationResultBuilder crb, AMD64MacroAssembler masm, StackSlot result, Register register) {
 275             AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
 276             if (kind.isXMM()) {
 277                 VexMoveOp op;
 278                 if (kind.getVectorLength() > 1) {
 279                     op = getVectorMoveOp(kind.getScalar());
 280                 } else {
 281                     op = getScalarMoveOp(kind);
 282                 }
 283 
 284                 AMD64Address addr = (AMD64Address) crb.asAddress(result);
 285                 op.emit(masm, AVXKind.getRegisterSize(kind), addr, register);
 286             } else {
 287                 super.saveRegister(crb, masm, result, register);
 288             }
 289         }
 290     }


 317     }
 318 
 319     private static VexMoveOp getScalarMoveOp(AMD64Kind kind) {
 320         switch (kind) {
 321             case SINGLE:
 322                 return VMOVSS;
 323             case DOUBLE:
 324                 return VMOVSD;
 325             default:
 326                 throw GraalError.shouldNotReachHere();
 327         }
 328     }
 329 
 330     private static VexMoveOp getVectorMoveOp(AMD64Kind kind) {
 331         switch (kind) {
 332             case SINGLE:
 333                 return VMOVUPS;
 334             case DOUBLE:
 335                 return VMOVUPD;
 336             default:
 337                 return VMOVDQU32;
 338         }
 339     }
 340 
 341     private static VexMoveOp getVectorMemMoveOp(AMD64Kind kind) {
 342         switch (AVXKind.getDataSize(kind)) {
 343             case DWORD:
 344                 return VMOVD;
 345             case QWORD:
 346                 return VMOVQ;
 347             default:
 348                 return getVectorMoveOp(kind.getScalar());
 349         }
 350     }
 351 
 352     private static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, AllocatableValue result, Value input) {
 353         VexMoveOp op;
 354         AVXSize size;
 355         AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
 356         if (kind.getVectorLength() > 1) {
 357             size = AVXKind.getRegisterSize(kind);


< prev index next >