1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VFMADD231SD; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VFMADD231SS; 62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD; 65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS; 66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 68 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD; 69 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS; 70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 76 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 77 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 78 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 79 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 80 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 81 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 82 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 83 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 84 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 85 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 89 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 90 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 95 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 96 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp; 97 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 98 import org.graalvm.compiler.asm.amd64.AVXKind; 99 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 100 import org.graalvm.compiler.core.common.LIRKind; 101 import org.graalvm.compiler.core.common.NumUtil; 102 import org.graalvm.compiler.core.common.calc.FloatConvert; 103 import org.graalvm.compiler.debug.GraalError; 104 import org.graalvm.compiler.lir.ConstantValue; 105 import org.graalvm.compiler.lir.LIRFrameState; 106 import org.graalvm.compiler.lir.LIRValueUtil; 107 import org.graalvm.compiler.lir.Variable; 108 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 109 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 110 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 111 import org.graalvm.compiler.lir.amd64.AMD64Binary; 112 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 113 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 114 import org.graalvm.compiler.lir.amd64.AMD64MathCosOp; 115 import org.graalvm.compiler.lir.amd64.AMD64MathExpOp; 116 import org.graalvm.compiler.lir.amd64.AMD64MathLog10Op; 117 import org.graalvm.compiler.lir.amd64.AMD64MathLogOp; 118 import org.graalvm.compiler.lir.amd64.AMD64MathPowOp; 119 import org.graalvm.compiler.lir.amd64.AMD64MathSinOp; 120 import org.graalvm.compiler.lir.amd64.AMD64MathTanOp; 121 import org.graalvm.compiler.lir.amd64.AMD64Move; 122 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 123 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 124 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 125 import org.graalvm.compiler.lir.amd64.AMD64Ternary; 126 import org.graalvm.compiler.lir.amd64.AMD64Unary; 127 import org.graalvm.compiler.lir.amd64.AMD64ZeroMemoryOp; 128 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 129 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp; 130 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 131 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 132 133 import jdk.vm.ci.amd64.AMD64; 134 import jdk.vm.ci.amd64.AMD64.CPUFeature; 135 import jdk.vm.ci.amd64.AMD64Kind; 136 import jdk.vm.ci.code.CodeUtil; 137 import jdk.vm.ci.code.Register; 138 import jdk.vm.ci.code.RegisterValue; 139 import jdk.vm.ci.code.TargetDescription; 140 import jdk.vm.ci.meta.AllocatableValue; 141 import jdk.vm.ci.meta.Constant; 142 import jdk.vm.ci.meta.JavaConstant; 143 import jdk.vm.ci.meta.JavaKind; 144 import jdk.vm.ci.meta.PlatformKind; 145 import jdk.vm.ci.meta.VMConstant; 146 import jdk.vm.ci.meta.Value; 147 import jdk.vm.ci.meta.ValueKind; 148 149 /** 150 * This class implements the AMD64 specific portion of the LIR generator. 151 */ 152 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 153 154 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 155 156 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue) { 157 this.nullRegisterValue = nullRegisterValue; 158 } 159 160 private final AllocatableValue nullRegisterValue; 161 162 @Override 163 public Variable emitNegate(Value inputVal) { 164 AllocatableValue input = asAllocatable(inputVal); 165 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 166 boolean isAvx = supportAVX(); 167 switch ((AMD64Kind) input.getPlatformKind()) { 168 case DWORD: 169 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 170 break; 171 case QWORD: 172 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 173 break; 174 case SINGLE: 175 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)); 176 if (isAvx) { 177 getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask)))); 178 } else { 179 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16)); 180 } 181 break; 182 case DOUBLE: 183 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)); 184 if (isAvx) { 185 getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask)))); 186 } else { 187 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16)); 188 } 189 break; 190 default: 191 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 192 } 193 return result; 194 } 195 196 @Override 197 public Variable emitNot(Value inputVal) { 198 AllocatableValue input = asAllocatable(inputVal); 199 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 200 switch ((AMD64Kind) input.getPlatformKind()) { 201 case DWORD: 202 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 203 break; 204 case QWORD: 205 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 206 break; 207 default: 208 throw GraalError.shouldNotReachHere(); 209 } 210 return result; 211 } 212 213 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 214 if (isJavaConstant(b)) { 215 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 216 } else if (commutative && isJavaConstant(a)) { 217 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 218 } else { 219 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 220 } 221 } 222 223 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 224 if (isJavaConstant(b)) { 225 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 226 } else if (commutative && isJavaConstant(a)) { 227 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 228 } else { 229 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 230 } 231 } 232 233 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 234 long value = b.getJavaConstant().asLong(); 235 if (NumUtil.isInt(value)) { 236 Variable result = getLIRGen().newVariable(resultKind); 237 int constant = (int) value; 238 239 if (!setFlags) { 240 AMD64MOp mop = getMOp(op, constant); 241 if (mop != null) { 242 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 243 return result; 244 } 245 } 246 247 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 248 return result; 249 } else { 250 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 251 } 252 } 253 254 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 255 if (constant == 1) { 256 if (op.equals(AMD64BinaryArithmetic.ADD)) { 257 return AMD64MOp.INC; 258 } 259 if (op.equals(AMD64BinaryArithmetic.SUB)) { 260 return AMD64MOp.DEC; 261 } 262 } else if (constant == -1) { 263 if (op.equals(AMD64BinaryArithmetic.ADD)) { 264 return AMD64MOp.DEC; 265 } 266 if (op.equals(AMD64BinaryArithmetic.SUB)) { 267 return AMD64MOp.INC; 268 } 269 } 270 return null; 271 } 272 273 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 274 Variable result = getLIRGen().newVariable(resultKind); 275 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 276 return result; 277 } 278 279 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 280 Variable result = getLIRGen().newVariable(resultKind); 281 if (commutative) { 282 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 283 } else { 284 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 285 } 286 return result; 287 } 288 289 @Override 290 protected boolean isNumericInteger(PlatformKind kind) { 291 return ((AMD64Kind) kind).isInteger(); 292 } 293 294 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 295 Variable result = getLIRGen().newVariable(resultKind); 296 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 297 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 298 return result; 299 } 300 301 @Override 302 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 303 boolean isAvx = supportAVX(); 304 switch ((AMD64Kind) a.getPlatformKind()) { 305 case DWORD: 306 if (isJavaConstant(b) && !setFlags) { 307 long displacement = asJavaConstant(b).asLong(); 308 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 309 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 310 } 311 } 312 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 313 case QWORD: 314 if (isJavaConstant(b) && !setFlags) { 315 long displacement = asJavaConstant(b).asLong(); 316 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 317 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 318 } 319 } 320 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 321 case SINGLE: 322 if (isAvx) { 323 return emitBinary(resultKind, VADDSS, a, b); 324 } else { 325 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 326 } 327 case DOUBLE: 328 if (isAvx) { 329 return emitBinary(resultKind, VADDSD, a, b); 330 } else { 331 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 332 } 333 default: 334 throw GraalError.shouldNotReachHere(); 335 } 336 } 337 338 @Override 339 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 340 boolean isAvx = supportAVX(); 341 switch ((AMD64Kind) a.getPlatformKind()) { 342 case DWORD: 343 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 344 case QWORD: 345 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 346 case SINGLE: 347 if (isAvx) { 348 return emitBinary(resultKind, VSUBSS, a, b); 349 } else { 350 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 351 } 352 case DOUBLE: 353 if (isAvx) { 354 return emitBinary(resultKind, VSUBSD, a, b); 355 } else { 356 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 357 } 358 default: 359 throw GraalError.shouldNotReachHere(); 360 } 361 } 362 363 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 364 long value = b.getJavaConstant().asLong(); 365 if (NumUtil.isInt(value)) { 366 int imm = (int) value; 367 AMD64RMIOp op; 368 if (NumUtil.isByte(imm)) { 369 op = AMD64RMIOp.IMUL_SX; 370 } else { 371 op = AMD64RMIOp.IMUL; 372 } 373 374 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 375 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 376 return ret; 377 } else { 378 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 379 } 380 } 381 382 private Variable emitIMUL(OperandSize size, Value a, Value b) { 383 if (isJavaConstant(b)) { 384 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 385 } else if (isJavaConstant(a)) { 386 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 387 } else { 388 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 389 } 390 } 391 392 @Override 393 public Variable emitMul(Value a, Value b, boolean setFlags) { 394 boolean isAvx = supportAVX(); 395 LIRKind resultKind = LIRKind.combine(a, b); 396 switch ((AMD64Kind) a.getPlatformKind()) { 397 case DWORD: 398 return emitIMUL(DWORD, a, b); 399 case QWORD: 400 return emitIMUL(QWORD, a, b); 401 case SINGLE: 402 if (isAvx) { 403 return emitBinary(resultKind, VMULSS, a, b); 404 } else { 405 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 406 } 407 case DOUBLE: 408 if (isAvx) { 409 return emitBinary(resultKind, VMULSD, a, b); 410 } else { 411 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 412 } 413 default: 414 throw GraalError.shouldNotReachHere(); 415 } 416 } 417 418 private RegisterValue moveToReg(Register reg, Value v) { 419 RegisterValue ret = reg.asValue(v.getValueKind()); 420 getLIRGen().emitMove(ret, v); 421 return ret; 422 } 423 424 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 425 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 426 return getLIRGen().emitMove(mulHigh.getHighResult()); 427 } 428 429 @Override 430 public Value emitMulHigh(Value a, Value b) { 431 switch ((AMD64Kind) a.getPlatformKind()) { 432 case DWORD: 433 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 434 case QWORD: 435 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 436 default: 437 throw GraalError.shouldNotReachHere(); 438 } 439 } 440 441 @Override 442 public Value emitUMulHigh(Value a, Value b) { 443 switch ((AMD64Kind) a.getPlatformKind()) { 444 case DWORD: 445 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 446 case QWORD: 447 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 448 default: 449 throw GraalError.shouldNotReachHere(); 450 } 451 } 452 453 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 454 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 455 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 456 return result; 457 } 458 459 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 460 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 461 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 462 return result; 463 } 464 465 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 466 // Issue a zero extending load of the proper bit size and set the result to 467 // the proper kind. 468 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 469 switch (memoryKind) { 470 case BYTE: 471 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 472 break; 473 case WORD: 474 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 475 break; 476 case DWORD: 477 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 478 break; 479 case QWORD: 480 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 481 break; 482 default: 483 throw GraalError.shouldNotReachHere(); 484 } 485 return result; 486 } 487 488 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 489 LIRKind kind = LIRKind.combine(a, b); 490 491 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 492 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 493 } 494 495 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 496 LIRKind kind = LIRKind.combine(a, b); 497 498 RegisterValue rax = moveToReg(AMD64.rax, a); 499 RegisterValue rdx = AMD64.rdx.asValue(kind); 500 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 501 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 502 } 503 504 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 505 AMD64MulDivOp op; 506 switch ((AMD64Kind) a.getPlatformKind()) { 507 case DWORD: 508 op = emitIDIV(DWORD, a, b, state); 509 break; 510 case QWORD: 511 op = emitIDIV(QWORD, a, b, state); 512 break; 513 default: 514 throw GraalError.shouldNotReachHere(); 515 } 516 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 517 } 518 519 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 520 AMD64MulDivOp op; 521 switch ((AMD64Kind) a.getPlatformKind()) { 522 case DWORD: 523 op = emitDIV(DWORD, a, b, state); 524 break; 525 case QWORD: 526 op = emitDIV(QWORD, a, b, state); 527 break; 528 default: 529 throw GraalError.shouldNotReachHere(); 530 } 531 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 532 } 533 534 @Override 535 public Value emitDiv(Value a, Value b, LIRFrameState state) { 536 boolean isAvx = supportAVX(); 537 LIRKind resultKind = LIRKind.combine(a, b); 538 switch ((AMD64Kind) a.getPlatformKind()) { 539 case DWORD: 540 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 541 return getLIRGen().emitMove(op.getQuotient()); 542 case QWORD: 543 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 544 return getLIRGen().emitMove(lop.getQuotient()); 545 case SINGLE: 546 if (isAvx) { 547 return emitBinary(resultKind, VDIVSS, a, b); 548 } else { 549 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 550 } 551 case DOUBLE: 552 if (isAvx) { 553 return emitBinary(resultKind, VDIVSD, a, b); 554 } else { 555 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 556 } 557 default: 558 throw GraalError.shouldNotReachHere(); 559 } 560 } 561 562 @Override 563 public Value emitRem(Value a, Value b, LIRFrameState state) { 564 switch ((AMD64Kind) a.getPlatformKind()) { 565 case DWORD: 566 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 567 return getLIRGen().emitMove(op.getRemainder()); 568 case QWORD: 569 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 570 return getLIRGen().emitMove(lop.getRemainder()); 571 case SINGLE: { 572 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 573 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 574 return result; 575 } 576 case DOUBLE: { 577 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 578 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 579 return result; 580 } 581 default: 582 throw GraalError.shouldNotReachHere(); 583 } 584 } 585 586 @Override 587 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 588 AMD64MulDivOp op; 589 switch ((AMD64Kind) a.getPlatformKind()) { 590 case DWORD: 591 op = emitDIV(DWORD, a, b, state); 592 break; 593 case QWORD: 594 op = emitDIV(QWORD, a, b, state); 595 break; 596 default: 597 throw GraalError.shouldNotReachHere(); 598 } 599 return getLIRGen().emitMove(op.getQuotient()); 600 } 601 602 @Override 603 public Variable emitURem(Value a, Value b, LIRFrameState state) { 604 AMD64MulDivOp op; 605 switch ((AMD64Kind) a.getPlatformKind()) { 606 case DWORD: 607 op = emitDIV(DWORD, a, b, state); 608 break; 609 case QWORD: 610 op = emitDIV(QWORD, a, b, state); 611 break; 612 default: 613 throw GraalError.shouldNotReachHere(); 614 } 615 return getLIRGen().emitMove(op.getRemainder()); 616 } 617 618 @Override 619 public Variable emitAnd(Value a, Value b) { 620 LIRKind resultKind = LIRKind.combine(a, b); 621 switch ((AMD64Kind) a.getPlatformKind()) { 622 case DWORD: 623 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 624 case QWORD: 625 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 626 case SINGLE: 627 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 628 case DOUBLE: 629 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 630 default: 631 throw GraalError.shouldNotReachHere(); 632 } 633 } 634 635 @Override 636 public Variable emitOr(Value a, Value b) { 637 boolean isAvx = supportAVX(); 638 LIRKind resultKind = LIRKind.combine(a, b); 639 switch ((AMD64Kind) a.getPlatformKind()) { 640 case DWORD: 641 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 642 case QWORD: 643 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 644 case SINGLE: 645 if (isAvx) { 646 return emitBinary(resultKind, VORPS, a, b); 647 } else { 648 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 649 } 650 case DOUBLE: 651 if (isAvx) { 652 return emitBinary(resultKind, VORPD, a, b); 653 } else { 654 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 655 } 656 default: 657 throw GraalError.shouldNotReachHere(); 658 } 659 } 660 661 @Override 662 public Variable emitXor(Value a, Value b) { 663 boolean isAvx = supportAVX(); 664 LIRKind resultKind = LIRKind.combine(a, b); 665 switch ((AMD64Kind) a.getPlatformKind()) { 666 case DWORD: 667 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 668 case QWORD: 669 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 670 case SINGLE: 671 if (isAvx) { 672 return emitBinary(resultKind, VXORPS, a, b); 673 } else { 674 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 675 } 676 case DOUBLE: 677 if (isAvx) { 678 return emitBinary(resultKind, VXORPD, a, b); 679 } else { 680 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 681 } 682 default: 683 throw GraalError.shouldNotReachHere(); 684 } 685 } 686 687 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 688 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 689 AllocatableValue input = asAllocatable(a); 690 if (isJavaConstant(b)) { 691 JavaConstant c = asJavaConstant(b); 692 if (c.asLong() == 1) { 693 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 694 } else { 695 /* 696 * c needs to be masked here, because shifts with immediate expect a byte. 697 */ 698 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (byte) c.asLong())); 699 } 700 } else { 701 getLIRGen().emitMove(RCX_I, b); 702 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 703 } 704 return result; 705 } 706 707 @Override 708 public Variable emitShl(Value a, Value b) { 709 switch ((AMD64Kind) a.getPlatformKind()) { 710 case DWORD: 711 return emitShift(SHL, DWORD, a, b); 712 case QWORD: 713 return emitShift(SHL, QWORD, a, b); 714 default: 715 throw GraalError.shouldNotReachHere(); 716 } 717 } 718 719 @Override 720 public Variable emitShr(Value a, Value b) { 721 switch ((AMD64Kind) a.getPlatformKind()) { 722 case DWORD: 723 return emitShift(SAR, DWORD, a, b); 724 case QWORD: 725 return emitShift(SAR, QWORD, a, b); 726 default: 727 throw GraalError.shouldNotReachHere(); 728 } 729 } 730 731 @Override 732 public Variable emitUShr(Value a, Value b) { 733 switch ((AMD64Kind) a.getPlatformKind()) { 734 case DWORD: 735 return emitShift(SHR, DWORD, a, b); 736 case QWORD: 737 return emitShift(SHR, QWORD, a, b); 738 default: 739 throw GraalError.shouldNotReachHere(); 740 } 741 } 742 743 public Variable emitRol(Value a, Value b) { 744 switch ((AMD64Kind) a.getPlatformKind()) { 745 case DWORD: 746 return emitShift(ROL, DWORD, a, b); 747 case QWORD: 748 return emitShift(ROL, QWORD, a, b); 749 default: 750 throw GraalError.shouldNotReachHere(); 751 } 752 } 753 754 @Override 755 public Variable emitRor(Value a, Value b) { 756 switch ((AMD64Kind) a.getPlatformKind()) { 757 case DWORD: 758 return emitShift(ROR, DWORD, a, b); 759 case QWORD: 760 return emitShift(ROR, QWORD, a, b); 761 default: 762 throw GraalError.shouldNotReachHere(); 763 } 764 } 765 766 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 767 Variable result = getLIRGen().newVariable(kind); 768 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 769 return result; 770 } 771 772 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 773 Variable result = getLIRGen().newVariable(kind); 774 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 775 return result; 776 } 777 778 @Override 779 public Value emitReinterpret(LIRKind to, Value inputVal) { 780 ValueKind<?> from = inputVal.getValueKind(); 781 if (to.equals(from)) { 782 return inputVal; 783 } 784 785 AllocatableValue input = asAllocatable(inputVal); 786 /* 787 * Conversions between integer to floating point types require moves between CPU and FPU 788 * registers. 789 */ 790 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 791 switch ((AMD64Kind) to.getPlatformKind()) { 792 case DWORD: 793 switch (fromKind) { 794 case SINGLE: 795 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 796 } 797 break; 798 case QWORD: 799 switch (fromKind) { 800 case DOUBLE: 801 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 802 } 803 break; 804 case SINGLE: 805 switch (fromKind) { 806 case DWORD: 807 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 808 } 809 break; 810 case DOUBLE: 811 switch (fromKind) { 812 case QWORD: 813 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 814 } 815 break; 816 } 817 throw GraalError.shouldNotReachHere(); 818 } 819 820 @Override 821 public Value emitFloatConvert(FloatConvert op, Value input) { 822 switch (op) { 823 case D2F: 824 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 825 case D2I: 826 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 827 case D2L: 828 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 829 case F2D: 830 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 831 case F2I: 832 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 833 case F2L: 834 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 835 case I2D: 836 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 837 case I2F: 838 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 839 case L2D: 840 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 841 case L2F: 842 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 843 default: 844 throw GraalError.shouldNotReachHere(); 845 } 846 } 847 848 @Override 849 public Value emitNarrow(Value inputVal, int bits) { 850 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 851 // TODO make it possible to reinterpret Long as Int in LIR without move 852 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 853 } else { 854 return inputVal; 855 } 856 } 857 858 @Override 859 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 860 assert fromBits <= toBits && toBits <= 64; 861 if (fromBits == toBits) { 862 return inputVal; 863 } else if (toBits > 32) { 864 // sign extend to 64 bits 865 switch (fromBits) { 866 case 8: 867 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 868 case 16: 869 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 870 case 32: 871 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 872 default: 873 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 874 } 875 } else { 876 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 877 switch (fromBits) { 878 case 8: 879 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 880 case 16: 881 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 882 case 32: 883 return inputVal; 884 default: 885 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 886 } 887 } 888 } 889 890 @Override 891 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 892 assert fromBits <= toBits && toBits <= 64; 893 if (fromBits == toBits) { 894 return inputVal; 895 } else if (fromBits > 32) { 896 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 897 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 898 long mask = CodeUtil.mask(fromBits); 899 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 900 return result; 901 } else { 902 LIRKind resultKind = LIRKind.combine(inputVal); 903 if (toBits > 32) { 904 resultKind = resultKind.changeType(AMD64Kind.QWORD); 905 } else { 906 resultKind = resultKind.changeType(AMD64Kind.DWORD); 907 } 908 909 /* 910 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 911 * operations implicitly set the upper half of the register to 0, which is what we want 912 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 913 * sometimes one byte shorter. 914 */ 915 switch (fromBits) { 916 case 8: 917 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 918 case 16: 919 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 920 case 32: 921 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 922 } 923 924 // odd bit count, fall back on manual masking 925 Variable result = getLIRGen().newVariable(resultKind); 926 JavaConstant mask; 927 if (toBits > 32) { 928 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 929 } else { 930 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 931 } 932 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 933 return result; 934 } 935 } 936 937 @Override 938 public Variable emitBitCount(Value value) { 939 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 940 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 941 if (value.getPlatformKind() == AMD64Kind.QWORD) { 942 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 943 } else { 944 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 945 } 946 return result; 947 } 948 949 @Override 950 public Variable emitBitScanForward(Value value) { 951 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 952 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 953 return result; 954 } 955 956 @Override 957 public Variable emitBitScanReverse(Value value) { 958 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 959 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 960 if (value.getPlatformKind() == AMD64Kind.QWORD) { 961 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 962 } else { 963 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 964 } 965 return result; 966 } 967 968 @Override 969 public Variable emitFusedMultiplyAdd(Value a, Value b, Value c) { 970 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b, c)); 971 assert ((AMD64Kind) a.getPlatformKind()).isXMM() && ((AMD64Kind) b.getPlatformKind()).isXMM() && ((AMD64Kind) c.getPlatformKind()).isXMM(); 972 assert a.getPlatformKind().equals(b.getPlatformKind()); 973 assert b.getPlatformKind().equals(c.getPlatformKind()); 974 975 if (a.getPlatformKind() == AMD64Kind.DOUBLE) { 976 getLIRGen().append(new AMD64Ternary.ThreeOp(VFMADD231SD, AVXSize.XMM, result, asAllocatable(c), asAllocatable(a), asAllocatable(b))); 977 } else { 978 assert a.getPlatformKind() == AMD64Kind.SINGLE; 979 getLIRGen().append(new AMD64Ternary.ThreeOp(VFMADD231SS, AVXSize.XMM, result, asAllocatable(c), asAllocatable(a), asAllocatable(b))); 980 } 981 return result; 982 } 983 984 @Override 985 public Value emitCountLeadingZeros(Value value) { 986 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 987 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 988 if (value.getPlatformKind() == AMD64Kind.QWORD) { 989 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 990 } else { 991 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 992 } 993 return result; 994 } 995 996 @Override 997 public Value emitCountTrailingZeros(Value value) { 998 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 999 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 1000 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1001 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 1002 } else { 1003 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 1004 } 1005 return result; 1006 } 1007 1008 @Override 1009 public Value emitLogicalAndNot(Value value1, Value value2) { 1010 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 1011 1012 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 1013 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 1014 } else { 1015 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 1016 } 1017 return result; 1018 } 1019 1020 @Override 1021 public Value emitLowestSetIsolatedBit(Value value) { 1022 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1023 1024 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1025 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 1026 } else { 1027 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 1028 } 1029 1030 return result; 1031 } 1032 1033 @Override 1034 public Value emitGetMaskUpToLowestSetBit(Value value) { 1035 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1036 1037 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1038 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 1039 } else { 1040 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 1041 } 1042 1043 return result; 1044 } 1045 1046 @Override 1047 public Value emitResetLowestSetBit(Value value) { 1048 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1049 1050 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1051 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 1052 } else { 1053 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 1054 } 1055 1056 return result; 1057 } 1058 1059 @Override 1060 public Value emitMathAbs(Value input) { 1061 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1062 switch ((AMD64Kind) input.getPlatformKind()) { 1063 case SINGLE: 1064 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 1065 break; 1066 case DOUBLE: 1067 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1068 break; 1069 default: 1070 throw GraalError.shouldNotReachHere(); 1071 } 1072 return result; 1073 } 1074 1075 @Override 1076 public Value emitMathSqrt(Value input) { 1077 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1078 switch ((AMD64Kind) input.getPlatformKind()) { 1079 case SINGLE: 1080 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1081 break; 1082 case DOUBLE: 1083 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1084 break; 1085 default: 1086 throw GraalError.shouldNotReachHere(); 1087 } 1088 return result; 1089 } 1090 1091 @Override 1092 public Value emitMathLog(Value input, boolean base10) { 1093 if (base10) { 1094 return new AMD64MathLog10Op().emitLIRWrapper(getLIRGen(), input); 1095 } else { 1096 return new AMD64MathLogOp().emitLIRWrapper(getLIRGen(), input); 1097 } 1098 } 1099 1100 @Override 1101 public Value emitMathCos(Value input) { 1102 return new AMD64MathCosOp().emitLIRWrapper(getLIRGen(), input); 1103 } 1104 1105 @Override 1106 public Value emitMathSin(Value input) { 1107 return new AMD64MathSinOp().emitLIRWrapper(getLIRGen(), input); 1108 } 1109 1110 @Override 1111 public Value emitMathTan(Value input) { 1112 return new AMD64MathTanOp().emitLIRWrapper(getLIRGen(), input); 1113 } 1114 1115 @Override 1116 public Value emitMathExp(Value input) { 1117 return new AMD64MathExpOp().emitLIRWrapper(getLIRGen(), input); 1118 } 1119 1120 @Override 1121 public Value emitMathPow(Value x, Value y) { 1122 return new AMD64MathPowOp().emitLIRWrapper(getLIRGen(), x, y); 1123 } 1124 1125 @Override 1126 public void emitZeroMemory(Value address, Value length) { 1127 RegisterValue lengthReg = moveToReg(AMD64.rcx, length); 1128 getLIRGen().append(new AMD64ZeroMemoryOp(getAMD64LIRGen().asAddressValue(address), lengthReg)); 1129 } 1130 1131 protected AMD64LIRGenerator getAMD64LIRGen() { 1132 return (AMD64LIRGenerator) getLIRGen(); 1133 } 1134 1135 @Override 1136 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1137 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1138 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1139 switch ((AMD64Kind) kind.getPlatformKind()) { 1140 case BYTE: 1141 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1142 break; 1143 case WORD: 1144 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1145 break; 1146 case DWORD: 1147 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1148 break; 1149 case QWORD: 1150 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1151 break; 1152 case SINGLE: 1153 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1154 break; 1155 case DOUBLE: 1156 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1157 break; 1158 default: 1159 throw GraalError.shouldNotReachHere(); 1160 } 1161 return result; 1162 } 1163 1164 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1165 Constant c = value.getConstant(); 1166 if (JavaConstant.isNull(c)) { 1167 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1168 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1169 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1170 return; 1171 } else if (c instanceof VMConstant) { 1172 // only 32-bit constants can be patched 1173 if (kind == AMD64Kind.DWORD) { 1174 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1175 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1176 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1177 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1178 return; 1179 } 1180 } 1181 } else { 1182 JavaConstant jc = (JavaConstant) c; 1183 assert jc.getJavaKind().isPrimitive(); 1184 1185 AMD64MIOp op = AMD64MIOp.MOV; 1186 OperandSize size; 1187 long imm; 1188 1189 switch (kind) { 1190 case BYTE: 1191 op = AMD64MIOp.MOVB; 1192 size = BYTE; 1193 imm = jc.asInt(); 1194 break; 1195 case WORD: 1196 size = WORD; 1197 imm = jc.asInt(); 1198 break; 1199 case DWORD: 1200 size = DWORD; 1201 imm = jc.asInt(); 1202 break; 1203 case QWORD: 1204 size = QWORD; 1205 imm = jc.asLong(); 1206 break; 1207 case SINGLE: 1208 size = DWORD; 1209 imm = Float.floatToRawIntBits(jc.asFloat()); 1210 break; 1211 case DOUBLE: 1212 size = QWORD; 1213 imm = Double.doubleToRawLongBits(jc.asDouble()); 1214 break; 1215 default: 1216 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1217 } 1218 1219 if (NumUtil.isInt(imm)) { 1220 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1221 return; 1222 } 1223 } 1224 1225 // fallback: load, then store 1226 emitStore(kind, address, asAllocatable(value), state); 1227 } 1228 1229 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1230 switch (kind) { 1231 case BYTE: 1232 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1233 break; 1234 case WORD: 1235 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1236 break; 1237 case DWORD: 1238 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1239 break; 1240 case QWORD: 1241 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1242 break; 1243 case SINGLE: 1244 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1245 break; 1246 case DOUBLE: 1247 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1248 break; 1249 default: 1250 throw GraalError.shouldNotReachHere(); 1251 } 1252 } 1253 1254 @Override 1255 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1256 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1257 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1258 if (isConstantValue(input)) { 1259 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1260 } else { 1261 emitStore(kind, storeAddress, asAllocatable(input), state); 1262 } 1263 } 1264 1265 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1266 /* Uncompressed null pointers only */ 1267 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1268 } 1269 1270 @Override 1271 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1272 OperandSize size; 1273 switch (cmpKind) { 1274 case BYTE: 1275 size = BYTE; 1276 break; 1277 case WORD: 1278 size = WORD; 1279 break; 1280 case DWORD: 1281 size = DWORD; 1282 break; 1283 case QWORD: 1284 size = QWORD; 1285 break; 1286 case SINGLE: 1287 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1288 return; 1289 case DOUBLE: 1290 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1291 return; 1292 default: 1293 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1294 } 1295 1296 if (isConstantValue(right)) { 1297 Constant c = LIRValueUtil.asConstant(right); 1298 if (JavaConstant.isNull(c)) { 1299 if (mustReplaceNullWithNullRegister(c)) { 1300 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1301 } else { 1302 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1303 } 1304 return; 1305 } else if (c instanceof VMConstant) { 1306 VMConstant vc = (VMConstant) c; 1307 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1308 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1309 } else { 1310 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1311 } 1312 return; 1313 } else if (c instanceof JavaConstant) { 1314 JavaConstant jc = (JavaConstant) c; 1315 if (jc.isDefaultForKind()) { 1316 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1317 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1318 return; 1319 } else if (NumUtil.is32bit(jc.asLong())) { 1320 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1321 return; 1322 } 1323 } 1324 } 1325 1326 // fallback: load, then compare 1327 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1328 } 1329 1330 @Override 1331 public Value emitRound(Value value, RoundingMode mode) { 1332 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1333 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1334 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1335 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1336 } else { 1337 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1338 } 1339 return result; 1340 } 1341 1342 private boolean supportAVX() { 1343 TargetDescription target = getLIRGen().target(); 1344 return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 1345 } 1346 1347 private static AVXSize getRegisterSize(Value a) { 1348 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 1349 if (kind.isXMM()) { 1350 return AVXKind.getRegisterSize(kind); 1351 } else { 1352 return AVXSize.XMM; 1353 } 1354 } 1355 1356 private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) { 1357 Variable result = getLIRGen().newVariable(resultKind); 1358 getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b))); 1359 return result; 1360 } 1361 1362 }