< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64ArithmeticLIRGenerator.java

Print this page
rev 52889 : 8214023: Update Graal
   1 /*
   2  * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */


  36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF;
  37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR;
  38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT;
  39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV;
  40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD;
  41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS;
  42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX;
  43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB;
  44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD;
  45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX;
  46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB;
  47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT;
  48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST;
  49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB;
  50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT;
  51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL;
  52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR;
  53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR;
  54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL;
  55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR;












  56 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE;
  57 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
  58 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
  59 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
  60 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
  61 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD;
  62 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS;
  63 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD;
  64 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  65 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
  66 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  67 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
  68 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  69 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM;
  70 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM;
  71 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW;
  72 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS;
  73 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP;
  74 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG;
  75 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10;
  76 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN;
  77 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN;
  78 
  79 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
  80 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  81 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  82 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp;
  83 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp;
  84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
  85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift;
  86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
  87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp;
  88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp;


  89 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;

  90 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
  91 import org.graalvm.compiler.core.common.LIRKind;
  92 import org.graalvm.compiler.core.common.NumUtil;
  93 import org.graalvm.compiler.core.common.calc.FloatConvert;
  94 import org.graalvm.compiler.debug.GraalError;
  95 import org.graalvm.compiler.lir.ConstantValue;
  96 import org.graalvm.compiler.lir.LIRFrameState;
  97 import org.graalvm.compiler.lir.LIRValueUtil;
  98 import org.graalvm.compiler.lir.Variable;
  99 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
 100 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp;
 101 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
 102 import org.graalvm.compiler.lir.amd64.AMD64Binary;
 103 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
 104 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp;
 105 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp;
 106 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp;
 107 import org.graalvm.compiler.lir.amd64.AMD64Move;
 108 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp;
 109 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp;
 110 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp;
 111 import org.graalvm.compiler.lir.amd64.AMD64Unary;
 112 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary;

 113 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary;
 114 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator;
 115 import org.graalvm.compiler.lir.gen.LIRGenerator;
 116 
 117 import jdk.vm.ci.amd64.AMD64;

 118 import jdk.vm.ci.amd64.AMD64Kind;
 119 import jdk.vm.ci.code.CodeUtil;
 120 import jdk.vm.ci.code.Register;
 121 import jdk.vm.ci.code.RegisterValue;

 122 import jdk.vm.ci.meta.AllocatableValue;
 123 import jdk.vm.ci.meta.Constant;
 124 import jdk.vm.ci.meta.JavaConstant;
 125 import jdk.vm.ci.meta.JavaKind;
 126 import jdk.vm.ci.meta.PlatformKind;
 127 import jdk.vm.ci.meta.VMConstant;
 128 import jdk.vm.ci.meta.Value;
 129 import jdk.vm.ci.meta.ValueKind;
 130 
 131 /**
 132  * This class implements the AMD64 specific portion of the LIR generator.
 133  */
 134 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool {
 135 
 136     private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD));
 137 
 138     public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue, Maths maths) {
 139         this.nullRegisterValue = nullRegisterValue;
 140         this.maths = maths == null ? new Maths() {
 141         } : maths;


 158         @SuppressWarnings("unused")
 159         default Variable emitCos(LIRGenerator gen, Value input) {
 160             return null;
 161         }
 162 
 163         @SuppressWarnings("unused")
 164         default Variable emitSin(LIRGenerator gen, Value input) {
 165             return null;
 166         }
 167 
 168         @SuppressWarnings("unused")
 169         default Variable emitTan(LIRGenerator gen, Value input) {
 170             return null;
 171         }
 172     }
 173 
 174     @Override
 175     public Variable emitNegate(Value inputVal) {
 176         AllocatableValue input = asAllocatable(inputVal);
 177         Variable result = getLIRGen().newVariable(LIRKind.combine(input));

 178         switch ((AMD64Kind) input.getPlatformKind()) {
 179             case DWORD:
 180                 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input));
 181                 break;
 182             case QWORD:
 183                 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input));
 184                 break;
 185             case SINGLE:
 186                 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16));





 187                 break;
 188             case DOUBLE:
 189                 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16));





 190                 break;
 191             default:
 192                 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString());
 193         }
 194         return result;
 195     }
 196 
 197     @Override
 198     public Variable emitNot(Value inputVal) {
 199         AllocatableValue input = asAllocatable(inputVal);
 200         Variable result = getLIRGen().newVariable(LIRKind.combine(input));
 201         switch ((AMD64Kind) input.getPlatformKind()) {
 202             case DWORD:
 203                 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input));
 204                 break;
 205             case QWORD:
 206                 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input));
 207                 break;
 208             default:
 209                 throw GraalError.shouldNotReachHere();


 284         } else {
 285             getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b));
 286         }
 287         return result;
 288     }
 289 
 290     @Override
 291     protected boolean isNumericInteger(PlatformKind kind) {
 292         return ((AMD64Kind) kind).isInteger();
 293     }
 294 
 295     private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) {
 296         Variable result = getLIRGen().newVariable(resultKind);
 297         AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset);
 298         getLIRGen().append(new AMD64Move.LeaOp(result, address, size));
 299         return result;
 300     }
 301 
 302     @Override
 303     public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) {

 304         switch ((AMD64Kind) a.getPlatformKind()) {
 305             case DWORD:
 306                 if (isJavaConstant(b) && !setFlags) {
 307                     long displacement = asJavaConstant(b).asLong();
 308                     if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) {
 309                         return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD);
 310                     }
 311                 }
 312                 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags);
 313             case QWORD:
 314                 if (isJavaConstant(b) && !setFlags) {
 315                     long displacement = asJavaConstant(b).asLong();
 316                     if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) {
 317                         return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD);
 318                     }
 319                 }
 320                 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags);
 321             case SINGLE:



 322                 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b);

 323             case DOUBLE:



 324                 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b);

 325             default:
 326                 throw GraalError.shouldNotReachHere();
 327         }
 328     }
 329 
 330     @Override
 331     public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) {

 332         switch ((AMD64Kind) a.getPlatformKind()) {
 333             case DWORD:
 334                 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags);
 335             case QWORD:
 336                 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags);
 337             case SINGLE:



 338                 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b);

 339             case DOUBLE:



 340                 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b);

 341             default:
 342                 throw GraalError.shouldNotReachHere();
 343         }
 344     }
 345 
 346     private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) {
 347         long value = b.getJavaConstant().asLong();
 348         if (NumUtil.isInt(value)) {
 349             int imm = (int) value;
 350             AMD64RMIOp op;
 351             if (NumUtil.isByte(imm)) {
 352                 op = AMD64RMIOp.IMUL_SX;
 353             } else {
 354                 op = AMD64RMIOp.IMUL;
 355             }
 356 
 357             Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b));
 358             getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm));
 359             return ret;
 360         } else {
 361             return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b));
 362         }
 363     }
 364 
 365     private Variable emitIMUL(OperandSize size, Value a, Value b) {
 366         if (isJavaConstant(b)) {
 367             return emitIMULConst(size, asAllocatable(a), asConstantValue(b));
 368         } else if (isJavaConstant(a)) {
 369             return emitIMULConst(size, asAllocatable(b), asConstantValue(a));
 370         } else {
 371             return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b));
 372         }
 373     }
 374 
 375     @Override
 376     public Variable emitMul(Value a, Value b, boolean setFlags) {

 377         LIRKind resultKind = LIRKind.combine(a, b);
 378         switch ((AMD64Kind) a.getPlatformKind()) {
 379             case DWORD:
 380                 return emitIMUL(DWORD, a, b);
 381             case QWORD:
 382                 return emitIMUL(QWORD, a, b);
 383             case SINGLE:



 384                 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b);

 385             case DOUBLE:



 386                 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b);

 387             default:
 388                 throw GraalError.shouldNotReachHere();
 389         }
 390     }
 391 
 392     private RegisterValue moveToReg(Register reg, Value v) {
 393         RegisterValue ret = reg.asValue(v.getValueKind());
 394         getLIRGen().emitMove(ret, v);
 395         return ret;
 396     }
 397 
 398     private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) {
 399         AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b)));
 400         return getLIRGen().emitMove(mulHigh.getHighResult());
 401     }
 402 
 403     @Override
 404     public Value emitMulHigh(Value a, Value b) {
 405         switch ((AMD64Kind) a.getPlatformKind()) {
 406             case DWORD:


 490         return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())};
 491     }
 492 
 493     public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) {
 494         AMD64MulDivOp op;
 495         switch ((AMD64Kind) a.getPlatformKind()) {
 496             case DWORD:
 497                 op = emitDIV(DWORD, a, b, state);
 498                 break;
 499             case QWORD:
 500                 op = emitDIV(QWORD, a, b, state);
 501                 break;
 502             default:
 503                 throw GraalError.shouldNotReachHere();
 504         }
 505         return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())};
 506     }
 507 
 508     @Override
 509     public Value emitDiv(Value a, Value b, LIRFrameState state) {

 510         LIRKind resultKind = LIRKind.combine(a, b);
 511         switch ((AMD64Kind) a.getPlatformKind()) {
 512             case DWORD:
 513                 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
 514                 return getLIRGen().emitMove(op.getQuotient());
 515             case QWORD:
 516                 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
 517                 return getLIRGen().emitMove(lop.getQuotient());
 518             case SINGLE:



 519                 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b);

 520             case DOUBLE:



 521                 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b);

 522             default:
 523                 throw GraalError.shouldNotReachHere();
 524         }
 525     }
 526 
 527     @Override
 528     public Value emitRem(Value a, Value b, LIRFrameState state) {
 529         switch ((AMD64Kind) a.getPlatformKind()) {
 530             case DWORD:
 531                 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
 532                 return getLIRGen().emitMove(op.getRemainder());
 533             case QWORD:
 534                 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
 535                 return getLIRGen().emitMove(lop.getRemainder());
 536             case SINGLE: {
 537                 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b));
 538                 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b)));
 539                 return result;
 540             }
 541             case DOUBLE: {


 582 
 583     @Override
 584     public Variable emitAnd(Value a, Value b) {
 585         LIRKind resultKind = LIRKind.combine(a, b);
 586         switch ((AMD64Kind) a.getPlatformKind()) {
 587             case DWORD:
 588                 return emitBinary(resultKind, AND, DWORD, true, a, b, false);
 589             case QWORD:
 590                 return emitBinary(resultKind, AND, QWORD, true, a, b, false);
 591             case SINGLE:
 592                 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b);
 593             case DOUBLE:
 594                 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b);
 595             default:
 596                 throw GraalError.shouldNotReachHere();
 597         }
 598     }
 599 
 600     @Override
 601     public Variable emitOr(Value a, Value b) {

 602         LIRKind resultKind = LIRKind.combine(a, b);
 603         switch ((AMD64Kind) a.getPlatformKind()) {
 604             case DWORD:
 605                 return emitBinary(resultKind, OR, DWORD, true, a, b, false);
 606             case QWORD:
 607                 return emitBinary(resultKind, OR, QWORD, true, a, b, false);
 608             case SINGLE:



 609                 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b);

 610             case DOUBLE:



 611                 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b);

 612             default:
 613                 throw GraalError.shouldNotReachHere();
 614         }
 615     }
 616 
 617     @Override
 618     public Variable emitXor(Value a, Value b) {

 619         LIRKind resultKind = LIRKind.combine(a, b);
 620         switch ((AMD64Kind) a.getPlatformKind()) {
 621             case DWORD:
 622                 return emitBinary(resultKind, XOR, DWORD, true, a, b, false);
 623             case QWORD:
 624                 return emitBinary(resultKind, XOR, QWORD, true, a, b, false);
 625             case SINGLE:



 626                 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b);

 627             case DOUBLE:



 628                 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b);

 629             default:
 630                 throw GraalError.shouldNotReachHere();
 631         }
 632     }
 633 
 634     private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) {
 635         Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind()));
 636         AllocatableValue input = asAllocatable(a);
 637         if (isJavaConstant(b)) {
 638             JavaConstant c = asJavaConstant(b);
 639             if (c.asLong() == 1) {
 640                 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input));
 641             } else {
 642                 /*
 643                  * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is
 644                  * always correct, even without the NumUtil.is32bit() test.
 645                  */
 646                 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong()));
 647             }
 648         } else {


1275                     return;
1276                 }
1277             }
1278         }
1279 
1280         // fallback: load, then compare
1281         getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right)));
1282     }
1283 
1284     @Override
1285     public Value emitRound(Value value, RoundingMode mode) {
1286         Variable result = getLIRGen().newVariable(LIRKind.combine(value));
1287         assert ((AMD64Kind) value.getPlatformKind()).isXMM();
1288         if (value.getPlatformKind() == AMD64Kind.SINGLE) {
1289             getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding));
1290         } else {
1291             getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding));
1292         }
1293         return result;
1294     }





















1295 }
   1 /*
   2  * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */


  36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF;
  37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR;
  38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT;
  39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV;
  40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD;
  41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS;
  42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX;
  43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB;
  44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD;
  45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX;
  46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB;
  47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT;
  48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST;
  49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB;
  50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT;
  51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL;
  52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR;
  53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR;
  54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL;
  55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR;
  56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD;
  57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS;
  58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD;
  59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS;
  60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD;
  61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS;
  62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD;
  63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS;
  64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD;
  65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS;
  66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD;
  67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS;
  68 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE;
  69 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
  70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
  71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
  72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
  73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD;
  74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS;
  75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD;
  76 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  77 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
  78 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  79 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
  80 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  81 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM;
  82 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM;
  83 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW;
  84 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS;
  85 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP;
  86 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG;
  87 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10;
  88 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN;
  89 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN;
  90 
  91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
  92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp;
  95 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp;
  96 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
  97 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift;
  98 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;

  99 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp;
 100 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp;
 101 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp;
 102 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
 103 import org.graalvm.compiler.asm.amd64.AVXKind;
 104 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
 105 import org.graalvm.compiler.core.common.LIRKind;
 106 import org.graalvm.compiler.core.common.NumUtil;
 107 import org.graalvm.compiler.core.common.calc.FloatConvert;
 108 import org.graalvm.compiler.debug.GraalError;
 109 import org.graalvm.compiler.lir.ConstantValue;
 110 import org.graalvm.compiler.lir.LIRFrameState;
 111 import org.graalvm.compiler.lir.LIRValueUtil;
 112 import org.graalvm.compiler.lir.Variable;
 113 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
 114 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp;
 115 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
 116 import org.graalvm.compiler.lir.amd64.AMD64Binary;
 117 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
 118 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp;
 119 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp;
 120 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp;
 121 import org.graalvm.compiler.lir.amd64.AMD64Move;
 122 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp;
 123 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp;
 124 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp;
 125 import org.graalvm.compiler.lir.amd64.AMD64Unary;
 126 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary;
 127 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp;
 128 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary;
 129 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator;
 130 import org.graalvm.compiler.lir.gen.LIRGenerator;
 131 
 132 import jdk.vm.ci.amd64.AMD64;
 133 import jdk.vm.ci.amd64.AMD64.CPUFeature;
 134 import jdk.vm.ci.amd64.AMD64Kind;
 135 import jdk.vm.ci.code.CodeUtil;
 136 import jdk.vm.ci.code.Register;
 137 import jdk.vm.ci.code.RegisterValue;
 138 import jdk.vm.ci.code.TargetDescription;
 139 import jdk.vm.ci.meta.AllocatableValue;
 140 import jdk.vm.ci.meta.Constant;
 141 import jdk.vm.ci.meta.JavaConstant;
 142 import jdk.vm.ci.meta.JavaKind;
 143 import jdk.vm.ci.meta.PlatformKind;
 144 import jdk.vm.ci.meta.VMConstant;
 145 import jdk.vm.ci.meta.Value;
 146 import jdk.vm.ci.meta.ValueKind;
 147 
 148 /**
 149  * This class implements the AMD64 specific portion of the LIR generator.
 150  */
 151 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool {
 152 
 153     private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD));
 154 
 155     public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue, Maths maths) {
 156         this.nullRegisterValue = nullRegisterValue;
 157         this.maths = maths == null ? new Maths() {
 158         } : maths;


 175         @SuppressWarnings("unused")
 176         default Variable emitCos(LIRGenerator gen, Value input) {
 177             return null;
 178         }
 179 
 180         @SuppressWarnings("unused")
 181         default Variable emitSin(LIRGenerator gen, Value input) {
 182             return null;
 183         }
 184 
 185         @SuppressWarnings("unused")
 186         default Variable emitTan(LIRGenerator gen, Value input) {
 187             return null;
 188         }
 189     }
 190 
 191     @Override
 192     public Variable emitNegate(Value inputVal) {
 193         AllocatableValue input = asAllocatable(inputVal);
 194         Variable result = getLIRGen().newVariable(LIRKind.combine(input));
 195         boolean isAvx = supportAVX();
 196         switch ((AMD64Kind) input.getPlatformKind()) {
 197             case DWORD:
 198                 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input));
 199                 break;
 200             case QWORD:
 201                 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input));
 202                 break;
 203             case SINGLE:
 204                 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000));
 205                 if (isAvx) {
 206                     getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask))));
 207                 } else {
 208                     getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16));
 209                 }
 210                 break;
 211             case DOUBLE:
 212                 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L));
 213                 if (isAvx) {
 214                     getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask))));
 215                 } else {
 216                     getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16));
 217                 }
 218                 break;
 219             default:
 220                 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString());
 221         }
 222         return result;
 223     }
 224 
 225     @Override
 226     public Variable emitNot(Value inputVal) {
 227         AllocatableValue input = asAllocatable(inputVal);
 228         Variable result = getLIRGen().newVariable(LIRKind.combine(input));
 229         switch ((AMD64Kind) input.getPlatformKind()) {
 230             case DWORD:
 231                 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input));
 232                 break;
 233             case QWORD:
 234                 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input));
 235                 break;
 236             default:
 237                 throw GraalError.shouldNotReachHere();


 312         } else {
 313             getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b));
 314         }
 315         return result;
 316     }
 317 
 318     @Override
 319     protected boolean isNumericInteger(PlatformKind kind) {
 320         return ((AMD64Kind) kind).isInteger();
 321     }
 322 
 323     private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) {
 324         Variable result = getLIRGen().newVariable(resultKind);
 325         AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset);
 326         getLIRGen().append(new AMD64Move.LeaOp(result, address, size));
 327         return result;
 328     }
 329 
 330     @Override
 331     public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) {
 332         boolean isAvx = supportAVX();
 333         switch ((AMD64Kind) a.getPlatformKind()) {
 334             case DWORD:
 335                 if (isJavaConstant(b) && !setFlags) {
 336                     long displacement = asJavaConstant(b).asLong();
 337                     if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) {
 338                         return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD);
 339                     }
 340                 }
 341                 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags);
 342             case QWORD:
 343                 if (isJavaConstant(b) && !setFlags) {
 344                     long displacement = asJavaConstant(b).asLong();
 345                     if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) {
 346                         return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD);
 347                     }
 348                 }
 349                 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags);
 350             case SINGLE:
 351                 if (isAvx) {
 352                     return emitBinary(resultKind, VADDSS, a, b);
 353                 } else {
 354                     return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b);
 355                 }
 356             case DOUBLE:
 357                 if (isAvx) {
 358                     return emitBinary(resultKind, VADDSD, a, b);
 359                 } else {
 360                     return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b);
 361                 }
 362             default:
 363                 throw GraalError.shouldNotReachHere();
 364         }
 365     }
 366 
 367     @Override
 368     public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) {
 369         boolean isAvx = supportAVX();
 370         switch ((AMD64Kind) a.getPlatformKind()) {
 371             case DWORD:
 372                 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags);
 373             case QWORD:
 374                 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags);
 375             case SINGLE:
 376                 if (isAvx) {
 377                     return emitBinary(resultKind, VSUBSS, a, b);
 378                 } else {
 379                     return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b);
 380                 }
 381             case DOUBLE:
 382                 if (isAvx) {
 383                     return emitBinary(resultKind, VSUBSD, a, b);
 384                 } else {
 385                     return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b);
 386                 }
 387             default:
 388                 throw GraalError.shouldNotReachHere();
 389         }
 390     }
 391 
 392     private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) {
 393         long value = b.getJavaConstant().asLong();
 394         if (NumUtil.isInt(value)) {
 395             int imm = (int) value;
 396             AMD64RMIOp op;
 397             if (NumUtil.isByte(imm)) {
 398                 op = AMD64RMIOp.IMUL_SX;
 399             } else {
 400                 op = AMD64RMIOp.IMUL;
 401             }
 402 
 403             Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b));
 404             getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm));
 405             return ret;
 406         } else {
 407             return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b));
 408         }
 409     }
 410 
 411     private Variable emitIMUL(OperandSize size, Value a, Value b) {
 412         if (isJavaConstant(b)) {
 413             return emitIMULConst(size, asAllocatable(a), asConstantValue(b));
 414         } else if (isJavaConstant(a)) {
 415             return emitIMULConst(size, asAllocatable(b), asConstantValue(a));
 416         } else {
 417             return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b));
 418         }
 419     }
 420 
 421     @Override
 422     public Variable emitMul(Value a, Value b, boolean setFlags) {
 423         boolean isAvx = supportAVX();
 424         LIRKind resultKind = LIRKind.combine(a, b);
 425         switch ((AMD64Kind) a.getPlatformKind()) {
 426             case DWORD:
 427                 return emitIMUL(DWORD, a, b);
 428             case QWORD:
 429                 return emitIMUL(QWORD, a, b);
 430             case SINGLE:
 431                 if (isAvx) {
 432                     return emitBinary(resultKind, VMULSS, a, b);
 433                 } else {
 434                     return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b);
 435                 }
 436             case DOUBLE:
 437                 if (isAvx) {
 438                     return emitBinary(resultKind, VMULSD, a, b);
 439                 } else {
 440                     return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b);
 441                 }
 442             default:
 443                 throw GraalError.shouldNotReachHere();
 444         }
 445     }
 446 
 447     private RegisterValue moveToReg(Register reg, Value v) {
 448         RegisterValue ret = reg.asValue(v.getValueKind());
 449         getLIRGen().emitMove(ret, v);
 450         return ret;
 451     }
 452 
 453     private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) {
 454         AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b)));
 455         return getLIRGen().emitMove(mulHigh.getHighResult());
 456     }
 457 
 458     @Override
 459     public Value emitMulHigh(Value a, Value b) {
 460         switch ((AMD64Kind) a.getPlatformKind()) {
 461             case DWORD:


 545         return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())};
 546     }
 547 
 548     public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) {
 549         AMD64MulDivOp op;
 550         switch ((AMD64Kind) a.getPlatformKind()) {
 551             case DWORD:
 552                 op = emitDIV(DWORD, a, b, state);
 553                 break;
 554             case QWORD:
 555                 op = emitDIV(QWORD, a, b, state);
 556                 break;
 557             default:
 558                 throw GraalError.shouldNotReachHere();
 559         }
 560         return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())};
 561     }
 562 
 563     @Override
 564     public Value emitDiv(Value a, Value b, LIRFrameState state) {
 565         boolean isAvx = supportAVX();
 566         LIRKind resultKind = LIRKind.combine(a, b);
 567         switch ((AMD64Kind) a.getPlatformKind()) {
 568             case DWORD:
 569                 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
 570                 return getLIRGen().emitMove(op.getQuotient());
 571             case QWORD:
 572                 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
 573                 return getLIRGen().emitMove(lop.getQuotient());
 574             case SINGLE:
 575                 if (isAvx) {
 576                     return emitBinary(resultKind, VDIVSS, a, b);
 577                 } else {
 578                     return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b);
 579                 }
 580             case DOUBLE:
 581                 if (isAvx) {
 582                     return emitBinary(resultKind, VDIVSD, a, b);
 583                 } else {
 584                     return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b);
 585                 }
 586             default:
 587                 throw GraalError.shouldNotReachHere();
 588         }
 589     }
 590 
 591     @Override
 592     public Value emitRem(Value a, Value b, LIRFrameState state) {
 593         switch ((AMD64Kind) a.getPlatformKind()) {
 594             case DWORD:
 595                 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
 596                 return getLIRGen().emitMove(op.getRemainder());
 597             case QWORD:
 598                 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
 599                 return getLIRGen().emitMove(lop.getRemainder());
 600             case SINGLE: {
 601                 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b));
 602                 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b)));
 603                 return result;
 604             }
 605             case DOUBLE: {


 646 
 647     @Override
 648     public Variable emitAnd(Value a, Value b) {
 649         LIRKind resultKind = LIRKind.combine(a, b);
 650         switch ((AMD64Kind) a.getPlatformKind()) {
 651             case DWORD:
 652                 return emitBinary(resultKind, AND, DWORD, true, a, b, false);
 653             case QWORD:
 654                 return emitBinary(resultKind, AND, QWORD, true, a, b, false);
 655             case SINGLE:
 656                 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b);
 657             case DOUBLE:
 658                 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b);
 659             default:
 660                 throw GraalError.shouldNotReachHere();
 661         }
 662     }
 663 
 664     @Override
 665     public Variable emitOr(Value a, Value b) {
 666         boolean isAvx = supportAVX();
 667         LIRKind resultKind = LIRKind.combine(a, b);
 668         switch ((AMD64Kind) a.getPlatformKind()) {
 669             case DWORD:
 670                 return emitBinary(resultKind, OR, DWORD, true, a, b, false);
 671             case QWORD:
 672                 return emitBinary(resultKind, OR, QWORD, true, a, b, false);
 673             case SINGLE:
 674                 if (isAvx) {
 675                     return emitBinary(resultKind, VORPS, a, b);
 676                 } else {
 677                     return emitBinary(resultKind, SSEOp.OR, PS, true, a, b);
 678                 }
 679             case DOUBLE:
 680                 if (isAvx) {
 681                     return emitBinary(resultKind, VORPD, a, b);
 682                 } else {
 683                     return emitBinary(resultKind, SSEOp.OR, PD, true, a, b);
 684                 }
 685             default:
 686                 throw GraalError.shouldNotReachHere();
 687         }
 688     }
 689 
 690     @Override
 691     public Variable emitXor(Value a, Value b) {
 692         boolean isAvx = supportAVX();
 693         LIRKind resultKind = LIRKind.combine(a, b);
 694         switch ((AMD64Kind) a.getPlatformKind()) {
 695             case DWORD:
 696                 return emitBinary(resultKind, XOR, DWORD, true, a, b, false);
 697             case QWORD:
 698                 return emitBinary(resultKind, XOR, QWORD, true, a, b, false);
 699             case SINGLE:
 700                 if (isAvx) {
 701                     return emitBinary(resultKind, VXORPS, a, b);
 702                 } else {
 703                     return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b);
 704                 }
 705             case DOUBLE:
 706                 if (isAvx) {
 707                     return emitBinary(resultKind, VXORPD, a, b);
 708                 } else {
 709                     return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b);
 710                 }
 711             default:
 712                 throw GraalError.shouldNotReachHere();
 713         }
 714     }
 715 
 716     private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) {
 717         Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind()));
 718         AllocatableValue input = asAllocatable(a);
 719         if (isJavaConstant(b)) {
 720             JavaConstant c = asJavaConstant(b);
 721             if (c.asLong() == 1) {
 722                 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input));
 723             } else {
 724                 /*
 725                  * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is
 726                  * always correct, even without the NumUtil.is32bit() test.
 727                  */
 728                 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong()));
 729             }
 730         } else {


1357                     return;
1358                 }
1359             }
1360         }
1361 
1362         // fallback: load, then compare
1363         getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right)));
1364     }
1365 
1366     @Override
1367     public Value emitRound(Value value, RoundingMode mode) {
1368         Variable result = getLIRGen().newVariable(LIRKind.combine(value));
1369         assert ((AMD64Kind) value.getPlatformKind()).isXMM();
1370         if (value.getPlatformKind() == AMD64Kind.SINGLE) {
1371             getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding));
1372         } else {
1373             getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding));
1374         }
1375         return result;
1376     }
1377 
1378     private boolean supportAVX() {
1379         TargetDescription target = getLIRGen().target();
1380         return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX);
1381     }
1382 
1383     private static AVXSize getRegisterSize(Value a) {
1384         AMD64Kind kind = (AMD64Kind) a.getPlatformKind();
1385         if (kind.isXMM()) {
1386             return AVXKind.getRegisterSize(kind);
1387         } else {
1388             return AVXSize.XMM;
1389         }
1390     }
1391 
1392     private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) {
1393         Variable result = getLIRGen().newVariable(resultKind);
1394         getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b)));
1395         return result;
1396     }
1397 
1398 }
< prev index next >