1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 57 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 58 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 59 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 60 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 61 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 62 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 63 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 64 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 65 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 66 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 67 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 68 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 69 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 70 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 71 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW; 72 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS; 73 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP; 74 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG; 75 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10; 76 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN; 77 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN; 78 79 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 80 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 81 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 82 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 83 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 89 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 90 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 91 import org.graalvm.compiler.core.common.LIRKind; 92 import org.graalvm.compiler.core.common.NumUtil; 93 import org.graalvm.compiler.core.common.calc.FloatConvert; 94 import org.graalvm.compiler.debug.GraalError; 95 import org.graalvm.compiler.lir.ConstantValue; 96 import org.graalvm.compiler.lir.LIRFrameState; 97 import org.graalvm.compiler.lir.LIRValueUtil; 98 import org.graalvm.compiler.lir.Variable; 99 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 100 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 101 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 102 import org.graalvm.compiler.lir.amd64.AMD64Binary; 103 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 104 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 105 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp; 106 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp; 107 import org.graalvm.compiler.lir.amd64.AMD64Move; 108 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 109 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 110 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 111 import org.graalvm.compiler.lir.amd64.AMD64Unary; 112 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 113 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 114 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 115 import org.graalvm.compiler.lir.gen.LIRGenerator; 116 117 import jdk.vm.ci.amd64.AMD64; 118 import jdk.vm.ci.amd64.AMD64Kind; 119 import jdk.vm.ci.code.CodeUtil; 120 import jdk.vm.ci.code.Register; 121 import jdk.vm.ci.code.RegisterValue; 122 import jdk.vm.ci.meta.AllocatableValue; 123 import jdk.vm.ci.meta.Constant; 124 import jdk.vm.ci.meta.JavaConstant; 125 import jdk.vm.ci.meta.JavaKind; 126 import jdk.vm.ci.meta.PlatformKind; 127 import jdk.vm.ci.meta.VMConstant; 128 import jdk.vm.ci.meta.Value; 129 import jdk.vm.ci.meta.ValueKind; 130 131 /** 132 * This class implements the AMD64 specific portion of the LIR generator. 133 */ 134 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 135 136 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 137 138 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue, Maths maths) { 139 this.nullRegisterValue = nullRegisterValue; 140 this.maths = maths == null ? new Maths() { 141 } : maths; 142 } 143 144 private final AllocatableValue nullRegisterValue; 145 private final Maths maths; 146 147 /** 148 * Interface for emitting LIR for selected {@link Math} routines. A {@code null} return value 149 * for any method in this interface means the caller must emit the LIR itself. 150 */ 151 public interface Maths { 152 153 @SuppressWarnings("unused") 154 default Variable emitLog(LIRGenerator gen, Value input, boolean base10) { 155 return null; 156 } 157 158 @SuppressWarnings("unused") 159 default Variable emitCos(LIRGenerator gen, Value input) { 160 return null; 161 } 162 163 @SuppressWarnings("unused") 164 default Variable emitSin(LIRGenerator gen, Value input) { 165 return null; 166 } 167 168 @SuppressWarnings("unused") 169 default Variable emitTan(LIRGenerator gen, Value input) { 170 return null; 171 } 172 } 173 174 @Override 175 public Variable emitNegate(Value inputVal) { 176 AllocatableValue input = asAllocatable(inputVal); 177 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 178 switch ((AMD64Kind) input.getPlatformKind()) { 179 case DWORD: 180 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 181 break; 182 case QWORD: 183 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 184 break; 185 case SINGLE: 186 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16)); 187 break; 188 case DOUBLE: 189 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16)); 190 break; 191 default: 192 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 193 } 194 return result; 195 } 196 197 @Override 198 public Variable emitNot(Value inputVal) { 199 AllocatableValue input = asAllocatable(inputVal); 200 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 201 switch ((AMD64Kind) input.getPlatformKind()) { 202 case DWORD: 203 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 204 break; 205 case QWORD: 206 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 207 break; 208 default: 209 throw GraalError.shouldNotReachHere(); 210 } 211 return result; 212 } 213 214 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 215 if (isJavaConstant(b)) { 216 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 217 } else if (commutative && isJavaConstant(a)) { 218 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 219 } else { 220 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 221 } 222 } 223 224 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 225 if (isJavaConstant(b)) { 226 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 227 } else if (commutative && isJavaConstant(a)) { 228 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 229 } else { 230 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 231 } 232 } 233 234 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 235 long value = b.getJavaConstant().asLong(); 236 if (NumUtil.isInt(value)) { 237 Variable result = getLIRGen().newVariable(resultKind); 238 int constant = (int) value; 239 240 if (!setFlags) { 241 AMD64MOp mop = getMOp(op, constant); 242 if (mop != null) { 243 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 244 return result; 245 } 246 } 247 248 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 249 return result; 250 } else { 251 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 252 } 253 } 254 255 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 256 if (constant == 1) { 257 if (op.equals(AMD64BinaryArithmetic.ADD)) { 258 return AMD64MOp.INC; 259 } 260 if (op.equals(AMD64BinaryArithmetic.SUB)) { 261 return AMD64MOp.DEC; 262 } 263 } else if (constant == -1) { 264 if (op.equals(AMD64BinaryArithmetic.ADD)) { 265 return AMD64MOp.DEC; 266 } 267 if (op.equals(AMD64BinaryArithmetic.SUB)) { 268 return AMD64MOp.INC; 269 } 270 } 271 return null; 272 } 273 274 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 275 Variable result = getLIRGen().newVariable(resultKind); 276 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 277 return result; 278 } 279 280 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 281 Variable result = getLIRGen().newVariable(resultKind); 282 if (commutative) { 283 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 284 } else { 285 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 286 } 287 return result; 288 } 289 290 @Override 291 protected boolean isNumericInteger(PlatformKind kind) { 292 return ((AMD64Kind) kind).isInteger(); 293 } 294 295 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 296 Variable result = getLIRGen().newVariable(resultKind); 297 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 298 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 299 return result; 300 } 301 302 @Override 303 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 304 switch ((AMD64Kind) a.getPlatformKind()) { 305 case DWORD: 306 if (isJavaConstant(b) && !setFlags) { 307 long displacement = asJavaConstant(b).asLong(); 308 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 309 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 310 } 311 } 312 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 313 case QWORD: 314 if (isJavaConstant(b) && !setFlags) { 315 long displacement = asJavaConstant(b).asLong(); 316 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 317 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 318 } 319 } 320 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 321 case SINGLE: 322 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 323 case DOUBLE: 324 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 325 default: 326 throw GraalError.shouldNotReachHere(); 327 } 328 } 329 330 @Override 331 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 332 switch ((AMD64Kind) a.getPlatformKind()) { 333 case DWORD: 334 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 335 case QWORD: 336 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 337 case SINGLE: 338 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 339 case DOUBLE: 340 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 341 default: 342 throw GraalError.shouldNotReachHere(); 343 } 344 } 345 346 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 347 long value = b.getJavaConstant().asLong(); 348 if (NumUtil.isInt(value)) { 349 int imm = (int) value; 350 AMD64RMIOp op; 351 if (NumUtil.isByte(imm)) { 352 op = AMD64RMIOp.IMUL_SX; 353 } else { 354 op = AMD64RMIOp.IMUL; 355 } 356 357 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 358 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 359 return ret; 360 } else { 361 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 362 } 363 } 364 365 private Variable emitIMUL(OperandSize size, Value a, Value b) { 366 if (isJavaConstant(b)) { 367 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 368 } else if (isJavaConstant(a)) { 369 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 370 } else { 371 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 372 } 373 } 374 375 @Override 376 public Variable emitMul(Value a, Value b, boolean setFlags) { 377 LIRKind resultKind = LIRKind.combine(a, b); 378 switch ((AMD64Kind) a.getPlatformKind()) { 379 case DWORD: 380 return emitIMUL(DWORD, a, b); 381 case QWORD: 382 return emitIMUL(QWORD, a, b); 383 case SINGLE: 384 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 385 case DOUBLE: 386 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 387 default: 388 throw GraalError.shouldNotReachHere(); 389 } 390 } 391 392 private RegisterValue moveToReg(Register reg, Value v) { 393 RegisterValue ret = reg.asValue(v.getValueKind()); 394 getLIRGen().emitMove(ret, v); 395 return ret; 396 } 397 398 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 399 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 400 return getLIRGen().emitMove(mulHigh.getHighResult()); 401 } 402 403 @Override 404 public Value emitMulHigh(Value a, Value b) { 405 switch ((AMD64Kind) a.getPlatformKind()) { 406 case DWORD: 407 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 408 case QWORD: 409 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 410 default: 411 throw GraalError.shouldNotReachHere(); 412 } 413 } 414 415 @Override 416 public Value emitUMulHigh(Value a, Value b) { 417 switch ((AMD64Kind) a.getPlatformKind()) { 418 case DWORD: 419 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 420 case QWORD: 421 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 422 default: 423 throw GraalError.shouldNotReachHere(); 424 } 425 } 426 427 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 428 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 429 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 430 return result; 431 } 432 433 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 434 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 435 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 436 return result; 437 } 438 439 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 440 // Issue a zero extending load of the proper bit size and set the result to 441 // the proper kind. 442 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 443 switch (memoryKind) { 444 case BYTE: 445 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 446 break; 447 case WORD: 448 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 449 break; 450 case DWORD: 451 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 452 break; 453 case QWORD: 454 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 455 break; 456 default: 457 throw GraalError.shouldNotReachHere(); 458 } 459 return result; 460 } 461 462 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 463 LIRKind kind = LIRKind.combine(a, b); 464 465 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 466 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 467 } 468 469 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 470 LIRKind kind = LIRKind.combine(a, b); 471 472 RegisterValue rax = moveToReg(AMD64.rax, a); 473 RegisterValue rdx = AMD64.rdx.asValue(kind); 474 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 475 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 476 } 477 478 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 479 AMD64MulDivOp op; 480 switch ((AMD64Kind) a.getPlatformKind()) { 481 case DWORD: 482 op = emitIDIV(DWORD, a, b, state); 483 break; 484 case QWORD: 485 op = emitIDIV(QWORD, a, b, state); 486 break; 487 default: 488 throw GraalError.shouldNotReachHere(); 489 } 490 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 491 } 492 493 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 494 AMD64MulDivOp op; 495 switch ((AMD64Kind) a.getPlatformKind()) { 496 case DWORD: 497 op = emitDIV(DWORD, a, b, state); 498 break; 499 case QWORD: 500 op = emitDIV(QWORD, a, b, state); 501 break; 502 default: 503 throw GraalError.shouldNotReachHere(); 504 } 505 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 506 } 507 508 @Override 509 public Value emitDiv(Value a, Value b, LIRFrameState state) { 510 LIRKind resultKind = LIRKind.combine(a, b); 511 switch ((AMD64Kind) a.getPlatformKind()) { 512 case DWORD: 513 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 514 return getLIRGen().emitMove(op.getQuotient()); 515 case QWORD: 516 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 517 return getLIRGen().emitMove(lop.getQuotient()); 518 case SINGLE: 519 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 520 case DOUBLE: 521 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 522 default: 523 throw GraalError.shouldNotReachHere(); 524 } 525 } 526 527 @Override 528 public Value emitRem(Value a, Value b, LIRFrameState state) { 529 switch ((AMD64Kind) a.getPlatformKind()) { 530 case DWORD: 531 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 532 return getLIRGen().emitMove(op.getRemainder()); 533 case QWORD: 534 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 535 return getLIRGen().emitMove(lop.getRemainder()); 536 case SINGLE: { 537 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 538 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 539 return result; 540 } 541 case DOUBLE: { 542 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 543 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 544 return result; 545 } 546 default: 547 throw GraalError.shouldNotReachHere(); 548 } 549 } 550 551 @Override 552 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 553 AMD64MulDivOp op; 554 switch ((AMD64Kind) a.getPlatformKind()) { 555 case DWORD: 556 op = emitDIV(DWORD, a, b, state); 557 break; 558 case QWORD: 559 op = emitDIV(QWORD, a, b, state); 560 break; 561 default: 562 throw GraalError.shouldNotReachHere(); 563 } 564 return getLIRGen().emitMove(op.getQuotient()); 565 } 566 567 @Override 568 public Variable emitURem(Value a, Value b, LIRFrameState state) { 569 AMD64MulDivOp op; 570 switch ((AMD64Kind) a.getPlatformKind()) { 571 case DWORD: 572 op = emitDIV(DWORD, a, b, state); 573 break; 574 case QWORD: 575 op = emitDIV(QWORD, a, b, state); 576 break; 577 default: 578 throw GraalError.shouldNotReachHere(); 579 } 580 return getLIRGen().emitMove(op.getRemainder()); 581 } 582 583 @Override 584 public Variable emitAnd(Value a, Value b) { 585 LIRKind resultKind = LIRKind.combine(a, b); 586 switch ((AMD64Kind) a.getPlatformKind()) { 587 case DWORD: 588 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 589 case QWORD: 590 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 591 case SINGLE: 592 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 593 case DOUBLE: 594 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 595 default: 596 throw GraalError.shouldNotReachHere(); 597 } 598 } 599 600 @Override 601 public Variable emitOr(Value a, Value b) { 602 LIRKind resultKind = LIRKind.combine(a, b); 603 switch ((AMD64Kind) a.getPlatformKind()) { 604 case DWORD: 605 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 606 case QWORD: 607 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 608 case SINGLE: 609 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 610 case DOUBLE: 611 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 612 default: 613 throw GraalError.shouldNotReachHere(); 614 } 615 } 616 617 @Override 618 public Variable emitXor(Value a, Value b) { 619 LIRKind resultKind = LIRKind.combine(a, b); 620 switch ((AMD64Kind) a.getPlatformKind()) { 621 case DWORD: 622 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 623 case QWORD: 624 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 625 case SINGLE: 626 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 627 case DOUBLE: 628 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 629 default: 630 throw GraalError.shouldNotReachHere(); 631 } 632 } 633 634 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 635 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 636 AllocatableValue input = asAllocatable(a); 637 if (isJavaConstant(b)) { 638 JavaConstant c = asJavaConstant(b); 639 if (c.asLong() == 1) { 640 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 641 } else { 642 /* 643 * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is 644 * always correct, even without the NumUtil.is32bit() test. 645 */ 646 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong())); 647 } 648 } else { 649 getLIRGen().emitMove(RCX_I, b); 650 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 651 } 652 return result; 653 } 654 655 @Override 656 public Variable emitShl(Value a, Value b) { 657 switch ((AMD64Kind) a.getPlatformKind()) { 658 case DWORD: 659 return emitShift(SHL, DWORD, a, b); 660 case QWORD: 661 return emitShift(SHL, QWORD, a, b); 662 default: 663 throw GraalError.shouldNotReachHere(); 664 } 665 } 666 667 @Override 668 public Variable emitShr(Value a, Value b) { 669 switch ((AMD64Kind) a.getPlatformKind()) { 670 case DWORD: 671 return emitShift(SAR, DWORD, a, b); 672 case QWORD: 673 return emitShift(SAR, QWORD, a, b); 674 default: 675 throw GraalError.shouldNotReachHere(); 676 } 677 } 678 679 @Override 680 public Variable emitUShr(Value a, Value b) { 681 switch ((AMD64Kind) a.getPlatformKind()) { 682 case DWORD: 683 return emitShift(SHR, DWORD, a, b); 684 case QWORD: 685 return emitShift(SHR, QWORD, a, b); 686 default: 687 throw GraalError.shouldNotReachHere(); 688 } 689 } 690 691 public Variable emitRol(Value a, Value b) { 692 switch ((AMD64Kind) a.getPlatformKind()) { 693 case DWORD: 694 return emitShift(ROL, DWORD, a, b); 695 case QWORD: 696 return emitShift(ROL, QWORD, a, b); 697 default: 698 throw GraalError.shouldNotReachHere(); 699 } 700 } 701 702 public Variable emitRor(Value a, Value b) { 703 switch ((AMD64Kind) a.getPlatformKind()) { 704 case DWORD: 705 return emitShift(ROR, DWORD, a, b); 706 case QWORD: 707 return emitShift(ROR, QWORD, a, b); 708 default: 709 throw GraalError.shouldNotReachHere(); 710 } 711 } 712 713 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 714 Variable result = getLIRGen().newVariable(kind); 715 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 716 return result; 717 } 718 719 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 720 Variable result = getLIRGen().newVariable(kind); 721 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 722 return result; 723 } 724 725 @Override 726 public Value emitReinterpret(LIRKind to, Value inputVal) { 727 ValueKind<?> from = inputVal.getValueKind(); 728 if (to.equals(from)) { 729 return inputVal; 730 } 731 732 AllocatableValue input = asAllocatable(inputVal); 733 /* 734 * Conversions between integer to floating point types require moves between CPU and FPU 735 * registers. 736 */ 737 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 738 switch ((AMD64Kind) to.getPlatformKind()) { 739 case DWORD: 740 switch (fromKind) { 741 case SINGLE: 742 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 743 } 744 break; 745 case QWORD: 746 switch (fromKind) { 747 case DOUBLE: 748 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 749 } 750 break; 751 case SINGLE: 752 switch (fromKind) { 753 case DWORD: 754 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 755 } 756 break; 757 case DOUBLE: 758 switch (fromKind) { 759 case QWORD: 760 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 761 } 762 break; 763 } 764 throw GraalError.shouldNotReachHere(); 765 } 766 767 @Override 768 public Value emitFloatConvert(FloatConvert op, Value input) { 769 switch (op) { 770 case D2F: 771 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 772 case D2I: 773 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 774 case D2L: 775 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 776 case F2D: 777 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 778 case F2I: 779 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 780 case F2L: 781 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 782 case I2D: 783 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 784 case I2F: 785 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 786 case L2D: 787 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 788 case L2F: 789 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 790 default: 791 throw GraalError.shouldNotReachHere(); 792 } 793 } 794 795 @Override 796 public Value emitNarrow(Value inputVal, int bits) { 797 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 798 // TODO make it possible to reinterpret Long as Int in LIR without move 799 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 800 } else { 801 return inputVal; 802 } 803 } 804 805 @Override 806 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 807 assert fromBits <= toBits && toBits <= 64; 808 if (fromBits == toBits) { 809 return inputVal; 810 } else if (toBits > 32) { 811 // sign extend to 64 bits 812 switch (fromBits) { 813 case 8: 814 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 815 case 16: 816 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 817 case 32: 818 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 819 default: 820 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 821 } 822 } else { 823 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 824 switch (fromBits) { 825 case 8: 826 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 827 case 16: 828 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 829 case 32: 830 return inputVal; 831 default: 832 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 833 } 834 } 835 } 836 837 @Override 838 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 839 assert fromBits <= toBits && toBits <= 64; 840 if (fromBits == toBits) { 841 return inputVal; 842 } else if (fromBits > 32) { 843 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 844 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 845 long mask = CodeUtil.mask(fromBits); 846 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 847 return result; 848 } else { 849 LIRKind resultKind = LIRKind.combine(inputVal); 850 if (toBits > 32) { 851 resultKind = resultKind.changeType(AMD64Kind.QWORD); 852 } else { 853 resultKind = resultKind.changeType(AMD64Kind.DWORD); 854 } 855 856 /* 857 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 858 * operations implicitly set the upper half of the register to 0, which is what we want 859 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 860 * sometimes one byte shorter. 861 */ 862 switch (fromBits) { 863 case 8: 864 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 865 case 16: 866 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 867 case 32: 868 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 869 } 870 871 // odd bit count, fall back on manual masking 872 Variable result = getLIRGen().newVariable(resultKind); 873 JavaConstant mask; 874 if (toBits > 32) { 875 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 876 } else { 877 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 878 } 879 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 880 return result; 881 } 882 } 883 884 @Override 885 public Variable emitBitCount(Value value) { 886 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 887 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 888 if (value.getPlatformKind() == AMD64Kind.QWORD) { 889 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 890 } else { 891 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 892 } 893 return result; 894 } 895 896 @Override 897 public Variable emitBitScanForward(Value value) { 898 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 899 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 900 return result; 901 } 902 903 @Override 904 public Variable emitBitScanReverse(Value value) { 905 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 906 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 907 if (value.getPlatformKind() == AMD64Kind.QWORD) { 908 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 909 } else { 910 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 911 } 912 return result; 913 } 914 915 @Override 916 public Value emitCountLeadingZeros(Value value) { 917 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 918 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 919 if (value.getPlatformKind() == AMD64Kind.QWORD) { 920 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 921 } else { 922 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 923 } 924 return result; 925 } 926 927 @Override 928 public Value emitCountTrailingZeros(Value value) { 929 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 930 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 931 if (value.getPlatformKind() == AMD64Kind.QWORD) { 932 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 933 } else { 934 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 935 } 936 return result; 937 } 938 939 @Override 940 public Value emitLogicalAndNot(Value value1, Value value2) { 941 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 942 943 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 944 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 945 } else { 946 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 947 } 948 return result; 949 } 950 951 @Override 952 public Value emitLowestSetIsolatedBit(Value value) { 953 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 954 955 if (value.getPlatformKind() == AMD64Kind.QWORD) { 956 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 957 } else { 958 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 959 } 960 961 return result; 962 } 963 964 @Override 965 public Value emitGetMaskUpToLowestSetBit(Value value) { 966 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 967 968 if (value.getPlatformKind() == AMD64Kind.QWORD) { 969 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 970 } else { 971 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 972 } 973 974 return result; 975 } 976 977 @Override 978 public Value emitResetLowestSetBit(Value value) { 979 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 980 981 if (value.getPlatformKind() == AMD64Kind.QWORD) { 982 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 983 } else { 984 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 985 } 986 987 return result; 988 } 989 990 @Override 991 public Value emitMathAbs(Value input) { 992 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 993 switch ((AMD64Kind) input.getPlatformKind()) { 994 case SINGLE: 995 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 996 break; 997 case DOUBLE: 998 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 999 break; 1000 default: 1001 throw GraalError.shouldNotReachHere(); 1002 } 1003 return result; 1004 } 1005 1006 @Override 1007 public Value emitMathSqrt(Value input) { 1008 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1009 switch ((AMD64Kind) input.getPlatformKind()) { 1010 case SINGLE: 1011 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1012 break; 1013 case DOUBLE: 1014 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1015 break; 1016 default: 1017 throw GraalError.shouldNotReachHere(); 1018 } 1019 return result; 1020 } 1021 1022 @Override 1023 public Value emitMathLog(Value input, boolean base10) { 1024 LIRGenerator gen = getLIRGen(); 1025 Variable result = maths.emitLog(gen, input, base10); 1026 if (result == null) { 1027 result = gen.newVariable(LIRKind.combine(input)); 1028 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1029 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), base10 ? LOG10 : LOG, result, asAllocatable(input), stackSlot)); 1030 } 1031 return result; 1032 } 1033 1034 @Override 1035 public Value emitMathCos(Value input) { 1036 LIRGenerator gen = getLIRGen(); 1037 Variable result = maths.emitCos(gen, input); 1038 if (result == null) { 1039 result = gen.newVariable(LIRKind.combine(input)); 1040 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1041 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), COS, result, asAllocatable(input), stackSlot)); 1042 } 1043 return result; 1044 } 1045 1046 @Override 1047 public Value emitMathSin(Value input) { 1048 LIRGenerator gen = getLIRGen(); 1049 Variable result = maths.emitSin(gen, input); 1050 if (result == null) { 1051 result = gen.newVariable(LIRKind.combine(input)); 1052 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1053 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), SIN, result, asAllocatable(input), stackSlot)); 1054 } 1055 return result; 1056 } 1057 1058 @Override 1059 public Value emitMathTan(Value input) { 1060 LIRGenerator gen = getLIRGen(); 1061 Variable result = maths.emitTan(gen, input); 1062 if (result == null) { 1063 result = gen.newVariable(LIRKind.combine(input)); 1064 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1065 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), TAN, result, asAllocatable(input), stackSlot)); 1066 } 1067 return result; 1068 } 1069 1070 @Override 1071 public Value emitMathExp(Value input) { 1072 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1073 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1074 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), EXP, result, asAllocatable(input), stackSlot)); 1075 return result; 1076 } 1077 1078 @Override 1079 public Value emitMathPow(Value input1, Value input2) { 1080 Variable result = getLIRGen().newVariable(LIRKind.combine(input1)); 1081 getLIRGen().append(new AMD64MathIntrinsicBinaryOp(getAMD64LIRGen(), POW, result, asAllocatable(input1), asAllocatable(input2))); 1082 return result; 1083 } 1084 1085 protected AMD64LIRGenerator getAMD64LIRGen() { 1086 return (AMD64LIRGenerator) getLIRGen(); 1087 } 1088 1089 @Override 1090 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1091 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1092 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1093 switch ((AMD64Kind) kind.getPlatformKind()) { 1094 case BYTE: 1095 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1096 break; 1097 case WORD: 1098 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1099 break; 1100 case DWORD: 1101 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1102 break; 1103 case QWORD: 1104 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1105 break; 1106 case SINGLE: 1107 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1108 break; 1109 case DOUBLE: 1110 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1111 break; 1112 default: 1113 throw GraalError.shouldNotReachHere(); 1114 } 1115 return result; 1116 } 1117 1118 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1119 Constant c = value.getConstant(); 1120 if (JavaConstant.isNull(c)) { 1121 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1122 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1123 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1124 return; 1125 } else if (c instanceof VMConstant) { 1126 // only 32-bit constants can be patched 1127 if (kind == AMD64Kind.DWORD) { 1128 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1129 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1130 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1131 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1132 return; 1133 } 1134 } 1135 } else { 1136 JavaConstant jc = (JavaConstant) c; 1137 assert jc.getJavaKind().isPrimitive(); 1138 1139 AMD64MIOp op = AMD64MIOp.MOV; 1140 OperandSize size; 1141 long imm; 1142 1143 switch (kind) { 1144 case BYTE: 1145 op = AMD64MIOp.MOVB; 1146 size = BYTE; 1147 imm = jc.asInt(); 1148 break; 1149 case WORD: 1150 size = WORD; 1151 imm = jc.asInt(); 1152 break; 1153 case DWORD: 1154 size = DWORD; 1155 imm = jc.asInt(); 1156 break; 1157 case QWORD: 1158 size = QWORD; 1159 imm = jc.asLong(); 1160 break; 1161 case SINGLE: 1162 size = DWORD; 1163 imm = Float.floatToRawIntBits(jc.asFloat()); 1164 break; 1165 case DOUBLE: 1166 size = QWORD; 1167 imm = Double.doubleToRawLongBits(jc.asDouble()); 1168 break; 1169 default: 1170 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1171 } 1172 1173 if (NumUtil.isInt(imm)) { 1174 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1175 return; 1176 } 1177 } 1178 1179 // fallback: load, then store 1180 emitStore(kind, address, asAllocatable(value), state); 1181 } 1182 1183 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1184 switch (kind) { 1185 case BYTE: 1186 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1187 break; 1188 case WORD: 1189 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1190 break; 1191 case DWORD: 1192 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1193 break; 1194 case QWORD: 1195 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1196 break; 1197 case SINGLE: 1198 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1199 break; 1200 case DOUBLE: 1201 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1202 break; 1203 default: 1204 throw GraalError.shouldNotReachHere(); 1205 } 1206 } 1207 1208 @Override 1209 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1210 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1211 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1212 if (isConstantValue(input)) { 1213 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1214 } else { 1215 emitStore(kind, storeAddress, asAllocatable(input), state); 1216 } 1217 } 1218 1219 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1220 /* Uncompressed null pointers only */ 1221 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1222 } 1223 1224 @Override 1225 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1226 OperandSize size; 1227 switch (cmpKind) { 1228 case BYTE: 1229 size = BYTE; 1230 break; 1231 case WORD: 1232 size = WORD; 1233 break; 1234 case DWORD: 1235 size = DWORD; 1236 break; 1237 case QWORD: 1238 size = QWORD; 1239 break; 1240 case SINGLE: 1241 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1242 return; 1243 case DOUBLE: 1244 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1245 return; 1246 default: 1247 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1248 } 1249 1250 if (isConstantValue(right)) { 1251 Constant c = LIRValueUtil.asConstant(right); 1252 if (JavaConstant.isNull(c)) { 1253 if (mustReplaceNullWithNullRegister(c)) { 1254 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1255 } else { 1256 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1257 } 1258 return; 1259 } else if (c instanceof VMConstant) { 1260 VMConstant vc = (VMConstant) c; 1261 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1262 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1263 } else { 1264 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1265 } 1266 return; 1267 } else if (c instanceof JavaConstant) { 1268 JavaConstant jc = (JavaConstant) c; 1269 if (jc.isDefaultForKind()) { 1270 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1271 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1272 return; 1273 } else if (NumUtil.is32bit(jc.asLong())) { 1274 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1275 return; 1276 } 1277 } 1278 } 1279 1280 // fallback: load, then compare 1281 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1282 } 1283 1284 @Override 1285 public Value emitRound(Value value, RoundingMode mode) { 1286 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1287 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1288 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1289 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1290 } else { 1291 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1292 } 1293 return result; 1294 } 1295 }