1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD; 63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS; 64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD; 67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS; 68 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 69 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 76 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 77 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 78 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 79 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 80 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 81 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 82 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 83 84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 89 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 90 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp; 95 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 96 import org.graalvm.compiler.asm.amd64.AVXKind; 97 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 98 import org.graalvm.compiler.core.common.LIRKind; 99 import org.graalvm.compiler.core.common.NumUtil; 100 import org.graalvm.compiler.core.common.calc.FloatConvert; 101 import org.graalvm.compiler.debug.GraalError; 102 import org.graalvm.compiler.lir.ConstantValue; 103 import org.graalvm.compiler.lir.LIRFrameState; 104 import org.graalvm.compiler.lir.LIRValueUtil; 105 import org.graalvm.compiler.lir.Variable; 106 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 107 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 108 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 109 import org.graalvm.compiler.lir.amd64.AMD64Binary; 110 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 111 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 112 import org.graalvm.compiler.lir.amd64.AMD64MathCosOp; 113 import org.graalvm.compiler.lir.amd64.AMD64MathExpOp; 114 import org.graalvm.compiler.lir.amd64.AMD64MathLog10Op; 115 import org.graalvm.compiler.lir.amd64.AMD64MathLogOp; 116 import org.graalvm.compiler.lir.amd64.AMD64MathPowOp; 117 import org.graalvm.compiler.lir.amd64.AMD64MathSinOp; 118 import org.graalvm.compiler.lir.amd64.AMD64MathTanOp; 119 import org.graalvm.compiler.lir.amd64.AMD64Move; 120 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 121 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 122 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 123 import org.graalvm.compiler.lir.amd64.AMD64Unary; 124 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 125 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp; 126 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 127 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 128 129 import jdk.vm.ci.amd64.AMD64; 130 import jdk.vm.ci.amd64.AMD64.CPUFeature; 131 import jdk.vm.ci.amd64.AMD64Kind; 132 import jdk.vm.ci.code.CodeUtil; 133 import jdk.vm.ci.code.Register; 134 import jdk.vm.ci.code.RegisterValue; 135 import jdk.vm.ci.code.TargetDescription; 136 import jdk.vm.ci.meta.AllocatableValue; 137 import jdk.vm.ci.meta.Constant; 138 import jdk.vm.ci.meta.JavaConstant; 139 import jdk.vm.ci.meta.JavaKind; 140 import jdk.vm.ci.meta.PlatformKind; 141 import jdk.vm.ci.meta.VMConstant; 142 import jdk.vm.ci.meta.Value; 143 import jdk.vm.ci.meta.ValueKind; 144 145 /** 146 * This class implements the AMD64 specific portion of the LIR generator. 147 */ 148 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 149 150 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 151 152 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue) { 153 this.nullRegisterValue = nullRegisterValue; 154 } 155 156 private final AllocatableValue nullRegisterValue; 157 158 @Override 159 public Variable emitNegate(Value inputVal) { 160 AllocatableValue input = asAllocatable(inputVal); 161 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 162 boolean isAvx = supportAVX(); 163 switch ((AMD64Kind) input.getPlatformKind()) { 164 case DWORD: 165 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 166 break; 167 case QWORD: 168 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 169 break; 170 case SINGLE: 171 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)); 172 if (isAvx) { 173 getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask)))); 174 } else { 175 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16)); 176 } 177 break; 178 case DOUBLE: 179 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)); 180 if (isAvx) { 181 getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask)))); 182 } else { 183 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16)); 184 } 185 break; 186 default: 187 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 188 } 189 return result; 190 } 191 192 @Override 193 public Variable emitNot(Value inputVal) { 194 AllocatableValue input = asAllocatable(inputVal); 195 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 196 switch ((AMD64Kind) input.getPlatformKind()) { 197 case DWORD: 198 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 199 break; 200 case QWORD: 201 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 202 break; 203 default: 204 throw GraalError.shouldNotReachHere(); 205 } 206 return result; 207 } 208 209 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 210 if (isJavaConstant(b)) { 211 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 212 } else if (commutative && isJavaConstant(a)) { 213 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 214 } else { 215 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 216 } 217 } 218 219 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 220 if (isJavaConstant(b)) { 221 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 222 } else if (commutative && isJavaConstant(a)) { 223 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 224 } else { 225 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 226 } 227 } 228 229 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 230 long value = b.getJavaConstant().asLong(); 231 if (NumUtil.isInt(value)) { 232 Variable result = getLIRGen().newVariable(resultKind); 233 int constant = (int) value; 234 235 if (!setFlags) { 236 AMD64MOp mop = getMOp(op, constant); 237 if (mop != null) { 238 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 239 return result; 240 } 241 } 242 243 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 244 return result; 245 } else { 246 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 247 } 248 } 249 250 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 251 if (constant == 1) { 252 if (op.equals(AMD64BinaryArithmetic.ADD)) { 253 return AMD64MOp.INC; 254 } 255 if (op.equals(AMD64BinaryArithmetic.SUB)) { 256 return AMD64MOp.DEC; 257 } 258 } else if (constant == -1) { 259 if (op.equals(AMD64BinaryArithmetic.ADD)) { 260 return AMD64MOp.DEC; 261 } 262 if (op.equals(AMD64BinaryArithmetic.SUB)) { 263 return AMD64MOp.INC; 264 } 265 } 266 return null; 267 } 268 269 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 270 Variable result = getLIRGen().newVariable(resultKind); 271 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 272 return result; 273 } 274 275 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 276 Variable result = getLIRGen().newVariable(resultKind); 277 if (commutative) { 278 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 279 } else { 280 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 281 } 282 return result; 283 } 284 285 @Override 286 protected boolean isNumericInteger(PlatformKind kind) { 287 return ((AMD64Kind) kind).isInteger(); 288 } 289 290 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 291 Variable result = getLIRGen().newVariable(resultKind); 292 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 293 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 294 return result; 295 } 296 297 @Override 298 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 299 boolean isAvx = supportAVX(); 300 switch ((AMD64Kind) a.getPlatformKind()) { 301 case DWORD: 302 if (isJavaConstant(b) && !setFlags) { 303 long displacement = asJavaConstant(b).asLong(); 304 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 305 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 306 } 307 } 308 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 309 case QWORD: 310 if (isJavaConstant(b) && !setFlags) { 311 long displacement = asJavaConstant(b).asLong(); 312 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 313 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 314 } 315 } 316 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 317 case SINGLE: 318 if (isAvx) { 319 return emitBinary(resultKind, VADDSS, a, b); 320 } else { 321 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 322 } 323 case DOUBLE: 324 if (isAvx) { 325 return emitBinary(resultKind, VADDSD, a, b); 326 } else { 327 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 328 } 329 default: 330 throw GraalError.shouldNotReachHere(); 331 } 332 } 333 334 @Override 335 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 336 boolean isAvx = supportAVX(); 337 switch ((AMD64Kind) a.getPlatformKind()) { 338 case DWORD: 339 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 340 case QWORD: 341 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 342 case SINGLE: 343 if (isAvx) { 344 return emitBinary(resultKind, VSUBSS, a, b); 345 } else { 346 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 347 } 348 case DOUBLE: 349 if (isAvx) { 350 return emitBinary(resultKind, VSUBSD, a, b); 351 } else { 352 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 353 } 354 default: 355 throw GraalError.shouldNotReachHere(); 356 } 357 } 358 359 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 360 long value = b.getJavaConstant().asLong(); 361 if (NumUtil.isInt(value)) { 362 int imm = (int) value; 363 AMD64RMIOp op; 364 if (NumUtil.isByte(imm)) { 365 op = AMD64RMIOp.IMUL_SX; 366 } else { 367 op = AMD64RMIOp.IMUL; 368 } 369 370 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 371 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 372 return ret; 373 } else { 374 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 375 } 376 } 377 378 private Variable emitIMUL(OperandSize size, Value a, Value b) { 379 if (isJavaConstant(b)) { 380 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 381 } else if (isJavaConstant(a)) { 382 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 383 } else { 384 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 385 } 386 } 387 388 @Override 389 public Variable emitMul(Value a, Value b, boolean setFlags) { 390 boolean isAvx = supportAVX(); 391 LIRKind resultKind = LIRKind.combine(a, b); 392 switch ((AMD64Kind) a.getPlatformKind()) { 393 case DWORD: 394 return emitIMUL(DWORD, a, b); 395 case QWORD: 396 return emitIMUL(QWORD, a, b); 397 case SINGLE: 398 if (isAvx) { 399 return emitBinary(resultKind, VMULSS, a, b); 400 } else { 401 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 402 } 403 case DOUBLE: 404 if (isAvx) { 405 return emitBinary(resultKind, VMULSD, a, b); 406 } else { 407 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 408 } 409 default: 410 throw GraalError.shouldNotReachHere(); 411 } 412 } 413 414 private RegisterValue moveToReg(Register reg, Value v) { 415 RegisterValue ret = reg.asValue(v.getValueKind()); 416 getLIRGen().emitMove(ret, v); 417 return ret; 418 } 419 420 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 421 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 422 return getLIRGen().emitMove(mulHigh.getHighResult()); 423 } 424 425 @Override 426 public Value emitMulHigh(Value a, Value b) { 427 switch ((AMD64Kind) a.getPlatformKind()) { 428 case DWORD: 429 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 430 case QWORD: 431 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 432 default: 433 throw GraalError.shouldNotReachHere(); 434 } 435 } 436 437 @Override 438 public Value emitUMulHigh(Value a, Value b) { 439 switch ((AMD64Kind) a.getPlatformKind()) { 440 case DWORD: 441 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 442 case QWORD: 443 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 444 default: 445 throw GraalError.shouldNotReachHere(); 446 } 447 } 448 449 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 450 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 451 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 452 return result; 453 } 454 455 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 456 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 457 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 458 return result; 459 } 460 461 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 462 // Issue a zero extending load of the proper bit size and set the result to 463 // the proper kind. 464 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 465 switch (memoryKind) { 466 case BYTE: 467 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 468 break; 469 case WORD: 470 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 471 break; 472 case DWORD: 473 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 474 break; 475 case QWORD: 476 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 477 break; 478 default: 479 throw GraalError.shouldNotReachHere(); 480 } 481 return result; 482 } 483 484 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 485 LIRKind kind = LIRKind.combine(a, b); 486 487 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 488 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 489 } 490 491 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 492 LIRKind kind = LIRKind.combine(a, b); 493 494 RegisterValue rax = moveToReg(AMD64.rax, a); 495 RegisterValue rdx = AMD64.rdx.asValue(kind); 496 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 497 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 498 } 499 500 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 501 AMD64MulDivOp op; 502 switch ((AMD64Kind) a.getPlatformKind()) { 503 case DWORD: 504 op = emitIDIV(DWORD, a, b, state); 505 break; 506 case QWORD: 507 op = emitIDIV(QWORD, a, b, state); 508 break; 509 default: 510 throw GraalError.shouldNotReachHere(); 511 } 512 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 513 } 514 515 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 516 AMD64MulDivOp op; 517 switch ((AMD64Kind) a.getPlatformKind()) { 518 case DWORD: 519 op = emitDIV(DWORD, a, b, state); 520 break; 521 case QWORD: 522 op = emitDIV(QWORD, a, b, state); 523 break; 524 default: 525 throw GraalError.shouldNotReachHere(); 526 } 527 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 528 } 529 530 @Override 531 public Value emitDiv(Value a, Value b, LIRFrameState state) { 532 boolean isAvx = supportAVX(); 533 LIRKind resultKind = LIRKind.combine(a, b); 534 switch ((AMD64Kind) a.getPlatformKind()) { 535 case DWORD: 536 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 537 return getLIRGen().emitMove(op.getQuotient()); 538 case QWORD: 539 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 540 return getLIRGen().emitMove(lop.getQuotient()); 541 case SINGLE: 542 if (isAvx) { 543 return emitBinary(resultKind, VDIVSS, a, b); 544 } else { 545 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 546 } 547 case DOUBLE: 548 if (isAvx) { 549 return emitBinary(resultKind, VDIVSD, a, b); 550 } else { 551 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 552 } 553 default: 554 throw GraalError.shouldNotReachHere(); 555 } 556 } 557 558 @Override 559 public Value emitRem(Value a, Value b, LIRFrameState state) { 560 switch ((AMD64Kind) a.getPlatformKind()) { 561 case DWORD: 562 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 563 return getLIRGen().emitMove(op.getRemainder()); 564 case QWORD: 565 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 566 return getLIRGen().emitMove(lop.getRemainder()); 567 case SINGLE: { 568 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 569 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 570 return result; 571 } 572 case DOUBLE: { 573 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 574 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 575 return result; 576 } 577 default: 578 throw GraalError.shouldNotReachHere(); 579 } 580 } 581 582 @Override 583 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 584 AMD64MulDivOp op; 585 switch ((AMD64Kind) a.getPlatformKind()) { 586 case DWORD: 587 op = emitDIV(DWORD, a, b, state); 588 break; 589 case QWORD: 590 op = emitDIV(QWORD, a, b, state); 591 break; 592 default: 593 throw GraalError.shouldNotReachHere(); 594 } 595 return getLIRGen().emitMove(op.getQuotient()); 596 } 597 598 @Override 599 public Variable emitURem(Value a, Value b, LIRFrameState state) { 600 AMD64MulDivOp op; 601 switch ((AMD64Kind) a.getPlatformKind()) { 602 case DWORD: 603 op = emitDIV(DWORD, a, b, state); 604 break; 605 case QWORD: 606 op = emitDIV(QWORD, a, b, state); 607 break; 608 default: 609 throw GraalError.shouldNotReachHere(); 610 } 611 return getLIRGen().emitMove(op.getRemainder()); 612 } 613 614 @Override 615 public Variable emitAnd(Value a, Value b) { 616 LIRKind resultKind = LIRKind.combine(a, b); 617 switch ((AMD64Kind) a.getPlatformKind()) { 618 case DWORD: 619 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 620 case QWORD: 621 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 622 case SINGLE: 623 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 624 case DOUBLE: 625 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 626 default: 627 throw GraalError.shouldNotReachHere(); 628 } 629 } 630 631 @Override 632 public Variable emitOr(Value a, Value b) { 633 boolean isAvx = supportAVX(); 634 LIRKind resultKind = LIRKind.combine(a, b); 635 switch ((AMD64Kind) a.getPlatformKind()) { 636 case DWORD: 637 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 638 case QWORD: 639 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 640 case SINGLE: 641 if (isAvx) { 642 return emitBinary(resultKind, VORPS, a, b); 643 } else { 644 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 645 } 646 case DOUBLE: 647 if (isAvx) { 648 return emitBinary(resultKind, VORPD, a, b); 649 } else { 650 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 651 } 652 default: 653 throw GraalError.shouldNotReachHere(); 654 } 655 } 656 657 @Override 658 public Variable emitXor(Value a, Value b) { 659 boolean isAvx = supportAVX(); 660 LIRKind resultKind = LIRKind.combine(a, b); 661 switch ((AMD64Kind) a.getPlatformKind()) { 662 case DWORD: 663 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 664 case QWORD: 665 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 666 case SINGLE: 667 if (isAvx) { 668 return emitBinary(resultKind, VXORPS, a, b); 669 } else { 670 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 671 } 672 case DOUBLE: 673 if (isAvx) { 674 return emitBinary(resultKind, VXORPD, a, b); 675 } else { 676 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 677 } 678 default: 679 throw GraalError.shouldNotReachHere(); 680 } 681 } 682 683 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 684 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 685 AllocatableValue input = asAllocatable(a); 686 if (isJavaConstant(b)) { 687 JavaConstant c = asJavaConstant(b); 688 if (c.asLong() == 1) { 689 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 690 } else { 691 /* 692 * c needs to be masked here, because shifts with immediate expect a byte. 693 */ 694 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (byte) c.asLong())); 695 } 696 } else { 697 getLIRGen().emitMove(RCX_I, b); 698 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 699 } 700 return result; 701 } 702 703 @Override 704 public Variable emitShl(Value a, Value b) { 705 switch ((AMD64Kind) a.getPlatformKind()) { 706 case DWORD: 707 return emitShift(SHL, DWORD, a, b); 708 case QWORD: 709 return emitShift(SHL, QWORD, a, b); 710 default: 711 throw GraalError.shouldNotReachHere(); 712 } 713 } 714 715 @Override 716 public Variable emitShr(Value a, Value b) { 717 switch ((AMD64Kind) a.getPlatformKind()) { 718 case DWORD: 719 return emitShift(SAR, DWORD, a, b); 720 case QWORD: 721 return emitShift(SAR, QWORD, a, b); 722 default: 723 throw GraalError.shouldNotReachHere(); 724 } 725 } 726 727 @Override 728 public Variable emitUShr(Value a, Value b) { 729 switch ((AMD64Kind) a.getPlatformKind()) { 730 case DWORD: 731 return emitShift(SHR, DWORD, a, b); 732 case QWORD: 733 return emitShift(SHR, QWORD, a, b); 734 default: 735 throw GraalError.shouldNotReachHere(); 736 } 737 } 738 739 public Variable emitRol(Value a, Value b) { 740 switch ((AMD64Kind) a.getPlatformKind()) { 741 case DWORD: 742 return emitShift(ROL, DWORD, a, b); 743 case QWORD: 744 return emitShift(ROL, QWORD, a, b); 745 default: 746 throw GraalError.shouldNotReachHere(); 747 } 748 } 749 750 @Override 751 public Variable emitRor(Value a, Value b) { 752 switch ((AMD64Kind) a.getPlatformKind()) { 753 case DWORD: 754 return emitShift(ROR, DWORD, a, b); 755 case QWORD: 756 return emitShift(ROR, QWORD, a, b); 757 default: 758 throw GraalError.shouldNotReachHere(); 759 } 760 } 761 762 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 763 Variable result = getLIRGen().newVariable(kind); 764 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 765 return result; 766 } 767 768 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 769 Variable result = getLIRGen().newVariable(kind); 770 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 771 return result; 772 } 773 774 @Override 775 public Value emitReinterpret(LIRKind to, Value inputVal) { 776 ValueKind<?> from = inputVal.getValueKind(); 777 if (to.equals(from)) { 778 return inputVal; 779 } 780 781 AllocatableValue input = asAllocatable(inputVal); 782 /* 783 * Conversions between integer to floating point types require moves between CPU and FPU 784 * registers. 785 */ 786 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 787 switch ((AMD64Kind) to.getPlatformKind()) { 788 case DWORD: 789 switch (fromKind) { 790 case SINGLE: 791 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 792 } 793 break; 794 case QWORD: 795 switch (fromKind) { 796 case DOUBLE: 797 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 798 } 799 break; 800 case SINGLE: 801 switch (fromKind) { 802 case DWORD: 803 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 804 } 805 break; 806 case DOUBLE: 807 switch (fromKind) { 808 case QWORD: 809 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 810 } 811 break; 812 } 813 throw GraalError.shouldNotReachHere(); 814 } 815 816 @Override 817 public Value emitFloatConvert(FloatConvert op, Value input) { 818 switch (op) { 819 case D2F: 820 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 821 case D2I: 822 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 823 case D2L: 824 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 825 case F2D: 826 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 827 case F2I: 828 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 829 case F2L: 830 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 831 case I2D: 832 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 833 case I2F: 834 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 835 case L2D: 836 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 837 case L2F: 838 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 839 default: 840 throw GraalError.shouldNotReachHere(); 841 } 842 } 843 844 @Override 845 public Value emitNarrow(Value inputVal, int bits) { 846 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 847 // TODO make it possible to reinterpret Long as Int in LIR without move 848 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 849 } else { 850 return inputVal; 851 } 852 } 853 854 @Override 855 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 856 assert fromBits <= toBits && toBits <= 64; 857 if (fromBits == toBits) { 858 return inputVal; 859 } else if (toBits > 32) { 860 // sign extend to 64 bits 861 switch (fromBits) { 862 case 8: 863 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 864 case 16: 865 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 866 case 32: 867 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 868 default: 869 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 870 } 871 } else { 872 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 873 switch (fromBits) { 874 case 8: 875 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 876 case 16: 877 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 878 case 32: 879 return inputVal; 880 default: 881 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 882 } 883 } 884 } 885 886 @Override 887 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 888 assert fromBits <= toBits && toBits <= 64; 889 if (fromBits == toBits) { 890 return inputVal; 891 } else if (fromBits > 32) { 892 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 893 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 894 long mask = CodeUtil.mask(fromBits); 895 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 896 return result; 897 } else { 898 LIRKind resultKind = LIRKind.combine(inputVal); 899 if (toBits > 32) { 900 resultKind = resultKind.changeType(AMD64Kind.QWORD); 901 } else { 902 resultKind = resultKind.changeType(AMD64Kind.DWORD); 903 } 904 905 /* 906 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 907 * operations implicitly set the upper half of the register to 0, which is what we want 908 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 909 * sometimes one byte shorter. 910 */ 911 switch (fromBits) { 912 case 8: 913 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 914 case 16: 915 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 916 case 32: 917 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 918 } 919 920 // odd bit count, fall back on manual masking 921 Variable result = getLIRGen().newVariable(resultKind); 922 JavaConstant mask; 923 if (toBits > 32) { 924 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 925 } else { 926 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 927 } 928 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 929 return result; 930 } 931 } 932 933 @Override 934 public Variable emitBitCount(Value value) { 935 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 936 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 937 if (value.getPlatformKind() == AMD64Kind.QWORD) { 938 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 939 } else { 940 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 941 } 942 return result; 943 } 944 945 @Override 946 public Variable emitBitScanForward(Value value) { 947 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 948 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 949 return result; 950 } 951 952 @Override 953 public Variable emitBitScanReverse(Value value) { 954 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 955 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 956 if (value.getPlatformKind() == AMD64Kind.QWORD) { 957 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 958 } else { 959 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 960 } 961 return result; 962 } 963 964 @Override 965 public Value emitCountLeadingZeros(Value value) { 966 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 967 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 968 if (value.getPlatformKind() == AMD64Kind.QWORD) { 969 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 970 } else { 971 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 972 } 973 return result; 974 } 975 976 @Override 977 public Value emitCountTrailingZeros(Value value) { 978 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 979 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 980 if (value.getPlatformKind() == AMD64Kind.QWORD) { 981 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 982 } else { 983 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 984 } 985 return result; 986 } 987 988 @Override 989 public Value emitLogicalAndNot(Value value1, Value value2) { 990 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 991 992 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 993 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 994 } else { 995 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 996 } 997 return result; 998 } 999 1000 @Override 1001 public Value emitLowestSetIsolatedBit(Value value) { 1002 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1003 1004 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1005 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 1006 } else { 1007 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 1008 } 1009 1010 return result; 1011 } 1012 1013 @Override 1014 public Value emitGetMaskUpToLowestSetBit(Value value) { 1015 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1016 1017 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1018 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 1019 } else { 1020 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 1021 } 1022 1023 return result; 1024 } 1025 1026 @Override 1027 public Value emitResetLowestSetBit(Value value) { 1028 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1029 1030 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1031 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 1032 } else { 1033 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 1034 } 1035 1036 return result; 1037 } 1038 1039 @Override 1040 public Value emitMathAbs(Value input) { 1041 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1042 switch ((AMD64Kind) input.getPlatformKind()) { 1043 case SINGLE: 1044 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 1045 break; 1046 case DOUBLE: 1047 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1048 break; 1049 default: 1050 throw GraalError.shouldNotReachHere(); 1051 } 1052 return result; 1053 } 1054 1055 @Override 1056 public Value emitMathSqrt(Value input) { 1057 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1058 switch ((AMD64Kind) input.getPlatformKind()) { 1059 case SINGLE: 1060 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1061 break; 1062 case DOUBLE: 1063 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1064 break; 1065 default: 1066 throw GraalError.shouldNotReachHere(); 1067 } 1068 return result; 1069 } 1070 1071 @Override 1072 public Value emitMathLog(Value input, boolean base10) { 1073 if (base10) { 1074 return new AMD64MathLog10Op().emitLIRWrapper(getLIRGen(), input); 1075 } else { 1076 return new AMD64MathLogOp().emitLIRWrapper(getLIRGen(), input); 1077 } 1078 } 1079 1080 @Override 1081 public Value emitMathCos(Value input) { 1082 return new AMD64MathCosOp().emitLIRWrapper(getLIRGen(), input); 1083 } 1084 1085 @Override 1086 public Value emitMathSin(Value input) { 1087 return new AMD64MathSinOp().emitLIRWrapper(getLIRGen(), input); 1088 } 1089 1090 @Override 1091 public Value emitMathTan(Value input) { 1092 return new AMD64MathTanOp().emitLIRWrapper(getLIRGen(), input); 1093 } 1094 1095 @Override 1096 public Value emitMathExp(Value input) { 1097 return new AMD64MathExpOp().emitLIRWrapper(getLIRGen(), input); 1098 } 1099 1100 @Override 1101 public Value emitMathPow(Value x, Value y) { 1102 return new AMD64MathPowOp().emitLIRWrapper(getLIRGen(), x, y); 1103 } 1104 1105 protected AMD64LIRGenerator getAMD64LIRGen() { 1106 return (AMD64LIRGenerator) getLIRGen(); 1107 } 1108 1109 @Override 1110 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1111 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1112 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1113 switch ((AMD64Kind) kind.getPlatformKind()) { 1114 case BYTE: 1115 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1116 break; 1117 case WORD: 1118 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1119 break; 1120 case DWORD: 1121 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1122 break; 1123 case QWORD: 1124 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1125 break; 1126 case SINGLE: 1127 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1128 break; 1129 case DOUBLE: 1130 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1131 break; 1132 default: 1133 throw GraalError.shouldNotReachHere(); 1134 } 1135 return result; 1136 } 1137 1138 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1139 Constant c = value.getConstant(); 1140 if (JavaConstant.isNull(c)) { 1141 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1142 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1143 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1144 return; 1145 } else if (c instanceof VMConstant) { 1146 // only 32-bit constants can be patched 1147 if (kind == AMD64Kind.DWORD) { 1148 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1149 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1150 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1151 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1152 return; 1153 } 1154 } 1155 } else { 1156 JavaConstant jc = (JavaConstant) c; 1157 assert jc.getJavaKind().isPrimitive(); 1158 1159 AMD64MIOp op = AMD64MIOp.MOV; 1160 OperandSize size; 1161 long imm; 1162 1163 switch (kind) { 1164 case BYTE: 1165 op = AMD64MIOp.MOVB; 1166 size = BYTE; 1167 imm = jc.asInt(); 1168 break; 1169 case WORD: 1170 size = WORD; 1171 imm = jc.asInt(); 1172 break; 1173 case DWORD: 1174 size = DWORD; 1175 imm = jc.asInt(); 1176 break; 1177 case QWORD: 1178 size = QWORD; 1179 imm = jc.asLong(); 1180 break; 1181 case SINGLE: 1182 size = DWORD; 1183 imm = Float.floatToRawIntBits(jc.asFloat()); 1184 break; 1185 case DOUBLE: 1186 size = QWORD; 1187 imm = Double.doubleToRawLongBits(jc.asDouble()); 1188 break; 1189 default: 1190 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1191 } 1192 1193 if (NumUtil.isInt(imm)) { 1194 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1195 return; 1196 } 1197 } 1198 1199 // fallback: load, then store 1200 emitStore(kind, address, asAllocatable(value), state); 1201 } 1202 1203 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1204 switch (kind) { 1205 case BYTE: 1206 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1207 break; 1208 case WORD: 1209 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1210 break; 1211 case DWORD: 1212 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1213 break; 1214 case QWORD: 1215 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1216 break; 1217 case SINGLE: 1218 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1219 break; 1220 case DOUBLE: 1221 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1222 break; 1223 default: 1224 throw GraalError.shouldNotReachHere(); 1225 } 1226 } 1227 1228 @Override 1229 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1230 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1231 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1232 if (isConstantValue(input)) { 1233 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1234 } else { 1235 emitStore(kind, storeAddress, asAllocatable(input), state); 1236 } 1237 } 1238 1239 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1240 /* Uncompressed null pointers only */ 1241 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1242 } 1243 1244 @Override 1245 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1246 OperandSize size; 1247 switch (cmpKind) { 1248 case BYTE: 1249 size = BYTE; 1250 break; 1251 case WORD: 1252 size = WORD; 1253 break; 1254 case DWORD: 1255 size = DWORD; 1256 break; 1257 case QWORD: 1258 size = QWORD; 1259 break; 1260 case SINGLE: 1261 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1262 return; 1263 case DOUBLE: 1264 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1265 return; 1266 default: 1267 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1268 } 1269 1270 if (isConstantValue(right)) { 1271 Constant c = LIRValueUtil.asConstant(right); 1272 if (JavaConstant.isNull(c)) { 1273 if (mustReplaceNullWithNullRegister(c)) { 1274 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1275 } else { 1276 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1277 } 1278 return; 1279 } else if (c instanceof VMConstant) { 1280 VMConstant vc = (VMConstant) c; 1281 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1282 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1283 } else { 1284 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1285 } 1286 return; 1287 } else if (c instanceof JavaConstant) { 1288 JavaConstant jc = (JavaConstant) c; 1289 if (jc.isDefaultForKind()) { 1290 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1291 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1292 return; 1293 } else if (NumUtil.is32bit(jc.asLong())) { 1294 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1295 return; 1296 } 1297 } 1298 } 1299 1300 // fallback: load, then compare 1301 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1302 } 1303 1304 @Override 1305 public Value emitRound(Value value, RoundingMode mode) { 1306 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1307 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1308 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1309 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1310 } else { 1311 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1312 } 1313 return result; 1314 } 1315 1316 private boolean supportAVX() { 1317 TargetDescription target = getLIRGen().target(); 1318 return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 1319 } 1320 1321 private static AVXSize getRegisterSize(Value a) { 1322 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 1323 if (kind.isXMM()) { 1324 return AVXKind.getRegisterSize(kind); 1325 } else { 1326 return AVXSize.XMM; 1327 } 1328 } 1329 1330 private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) { 1331 Variable result = getLIRGen().newVariable(resultKind); 1332 getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b))); 1333 return result; 1334 } 1335 1336 }