1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD; 63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS; 64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD; 67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS; 68 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 69 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 76 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 77 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 78 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 79 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 80 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 81 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 82 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 83 84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 89 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 90 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp; 95 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 96 import org.graalvm.compiler.asm.amd64.AVXKind; 97 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 98 import org.graalvm.compiler.core.common.LIRKind; 99 import org.graalvm.compiler.core.common.NumUtil; 100 import org.graalvm.compiler.core.common.calc.FloatConvert; 101 import org.graalvm.compiler.debug.GraalError; 102 import org.graalvm.compiler.lir.ConstantValue; 103 import org.graalvm.compiler.lir.LIRFrameState; 104 import org.graalvm.compiler.lir.LIRValueUtil; 105 import org.graalvm.compiler.lir.Variable; 106 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 107 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 108 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 109 import org.graalvm.compiler.lir.amd64.AMD64Binary; 110 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 111 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 112 import org.graalvm.compiler.lir.amd64.AMD64MathCosOp; 113 import org.graalvm.compiler.lir.amd64.AMD64MathExpOp; 114 import org.graalvm.compiler.lir.amd64.AMD64MathLog10Op; 115 import org.graalvm.compiler.lir.amd64.AMD64MathLogOp; 116 import org.graalvm.compiler.lir.amd64.AMD64MathPowOp; 117 import org.graalvm.compiler.lir.amd64.AMD64MathSinOp; 118 import org.graalvm.compiler.lir.amd64.AMD64MathTanOp; 119 import org.graalvm.compiler.lir.amd64.AMD64Move; 120 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 121 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 122 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 123 import org.graalvm.compiler.lir.amd64.AMD64Unary; 124 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 125 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp; 126 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 127 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 128 129 import jdk.vm.ci.amd64.AMD64; 130 import jdk.vm.ci.amd64.AMD64.CPUFeature; 131 import jdk.vm.ci.amd64.AMD64Kind; 132 import jdk.vm.ci.code.CodeUtil; 133 import jdk.vm.ci.code.Register; 134 import jdk.vm.ci.code.RegisterValue; 135 import jdk.vm.ci.code.TargetDescription; 136 import jdk.vm.ci.meta.AllocatableValue; 137 import jdk.vm.ci.meta.Constant; 138 import jdk.vm.ci.meta.JavaConstant; 139 import jdk.vm.ci.meta.JavaKind; 140 import jdk.vm.ci.meta.PlatformKind; 141 import jdk.vm.ci.meta.VMConstant; 142 import jdk.vm.ci.meta.Value; 143 import jdk.vm.ci.meta.ValueKind; 144 145 /** 146 * This class implements the AMD64 specific portion of the LIR generator. 147 */ 148 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 149 150 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 151 152 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue) { 153 this.nullRegisterValue = nullRegisterValue; 154 } 155 156 private final AllocatableValue nullRegisterValue; 157 158 @Override 159 public Variable emitNegate(Value inputVal) { 160 AllocatableValue input = asAllocatable(inputVal); 161 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 162 boolean isAvx = supportAVX(); 163 switch ((AMD64Kind) input.getPlatformKind()) { 164 case DWORD: 165 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 166 break; 167 case QWORD: 168 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 169 break; 170 case SINGLE: 171 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)); 172 if (isAvx) { 173 getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask)))); 174 } else { 175 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16)); 176 } 177 break; 178 case DOUBLE: 179 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)); 180 if (isAvx) { 181 getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask)))); 182 } else { 183 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16)); 184 } 185 break; 186 default: 187 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 188 } 189 return result; 190 } 191 192 @Override 193 public Variable emitNot(Value inputVal) { 194 AllocatableValue input = asAllocatable(inputVal); 195 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 196 switch ((AMD64Kind) input.getPlatformKind()) { 197 case DWORD: 198 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 199 break; 200 case QWORD: 201 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 202 break; 203 default: 204 throw GraalError.shouldNotReachHere(); 205 } 206 return result; 207 } 208 209 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 210 if (isJavaConstant(b)) { 211 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 212 } else if (commutative && isJavaConstant(a)) { 213 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 214 } else { 215 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 216 } 217 } 218 219 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 220 if (isJavaConstant(b)) { 221 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 222 } else if (commutative && isJavaConstant(a)) { 223 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 224 } else { 225 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 226 } 227 } 228 229 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 230 long value = b.getJavaConstant().asLong(); 231 if (NumUtil.isInt(value)) { 232 Variable result = getLIRGen().newVariable(resultKind); 233 int constant = (int) value; 234 235 if (!setFlags) { 236 AMD64MOp mop = getMOp(op, constant); 237 if (mop != null) { 238 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 239 return result; 240 } 241 } 242 243 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 244 return result; 245 } else { 246 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 247 } 248 } 249 250 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 251 if (constant == 1) { 252 if (op.equals(AMD64BinaryArithmetic.ADD)) { 253 return AMD64MOp.INC; 254 } 255 if (op.equals(AMD64BinaryArithmetic.SUB)) { 256 return AMD64MOp.DEC; 257 } 258 } else if (constant == -1) { 259 if (op.equals(AMD64BinaryArithmetic.ADD)) { 260 return AMD64MOp.DEC; 261 } 262 if (op.equals(AMD64BinaryArithmetic.SUB)) { 263 return AMD64MOp.INC; 264 } 265 } 266 return null; 267 } 268 269 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 270 Variable result = getLIRGen().newVariable(resultKind); 271 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 272 return result; 273 } 274 275 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 276 Variable result = getLIRGen().newVariable(resultKind); 277 if (commutative) { 278 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 279 } else { 280 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 281 } 282 return result; 283 } 284 285 @Override 286 protected boolean isNumericInteger(PlatformKind kind) { 287 return ((AMD64Kind) kind).isInteger(); 288 } 289 290 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 291 Variable result = getLIRGen().newVariable(resultKind); 292 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 293 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 294 return result; 295 } 296 297 @Override 298 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 299 boolean isAvx = supportAVX(); 300 switch ((AMD64Kind) a.getPlatformKind()) { 301 case DWORD: 302 if (isJavaConstant(b) && !setFlags) { 303 long displacement = asJavaConstant(b).asLong(); 304 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 305 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 306 } 307 } 308 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 309 case QWORD: 310 if (isJavaConstant(b) && !setFlags) { 311 long displacement = asJavaConstant(b).asLong(); 312 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 313 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 314 } 315 } 316 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 317 case SINGLE: 318 if (isAvx) { 319 return emitBinary(resultKind, VADDSS, a, b); 320 } else { 321 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 322 } 323 case DOUBLE: 324 if (isAvx) { 325 return emitBinary(resultKind, VADDSD, a, b); 326 } else { 327 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 328 } 329 default: 330 throw GraalError.shouldNotReachHere(); 331 } 332 } 333 334 @Override 335 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 336 boolean isAvx = supportAVX(); 337 switch ((AMD64Kind) a.getPlatformKind()) { 338 case DWORD: 339 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 340 case QWORD: 341 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 342 case SINGLE: 343 if (isAvx) { 344 return emitBinary(resultKind, VSUBSS, a, b); 345 } else { 346 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 347 } 348 case DOUBLE: 349 if (isAvx) { 350 return emitBinary(resultKind, VSUBSD, a, b); 351 } else { 352 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 353 } 354 default: 355 throw GraalError.shouldNotReachHere(); 356 } 357 } 358 359 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 360 long value = b.getJavaConstant().asLong(); 361 if (NumUtil.isInt(value)) { 362 int imm = (int) value; 363 AMD64RMIOp op; 364 if (NumUtil.isByte(imm)) { 365 op = AMD64RMIOp.IMUL_SX; 366 } else { 367 op = AMD64RMIOp.IMUL; 368 } 369 370 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 371 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 372 return ret; 373 } else { 374 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 375 } 376 } 377 378 private Variable emitIMUL(OperandSize size, Value a, Value b) { 379 if (isJavaConstant(b)) { 380 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 381 } else if (isJavaConstant(a)) { 382 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 383 } else { 384 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 385 } 386 } 387 388 @Override 389 public Variable emitMul(Value a, Value b, boolean setFlags) { 390 boolean isAvx = supportAVX(); 391 LIRKind resultKind = LIRKind.combine(a, b); 392 switch ((AMD64Kind) a.getPlatformKind()) { 393 case DWORD: 394 return emitIMUL(DWORD, a, b); 395 case QWORD: 396 return emitIMUL(QWORD, a, b); 397 case SINGLE: 398 if (isAvx) { 399 return emitBinary(resultKind, VMULSS, a, b); 400 } else { 401 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 402 } 403 case DOUBLE: 404 if (isAvx) { 405 return emitBinary(resultKind, VMULSD, a, b); 406 } else { 407 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 408 } 409 default: 410 throw GraalError.shouldNotReachHere(); 411 } 412 } 413 414 private RegisterValue moveToReg(Register reg, Value v) { 415 RegisterValue ret = reg.asValue(v.getValueKind()); 416 getLIRGen().emitMove(ret, v); 417 return ret; 418 } 419 420 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 421 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 422 return getLIRGen().emitMove(mulHigh.getHighResult()); 423 } 424 425 @Override 426 public Value emitMulHigh(Value a, Value b) { 427 switch ((AMD64Kind) a.getPlatformKind()) { 428 case DWORD: 429 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 430 case QWORD: 431 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 432 default: 433 throw GraalError.shouldNotReachHere(); 434 } 435 } 436 437 @Override 438 public Value emitUMulHigh(Value a, Value b) { 439 switch ((AMD64Kind) a.getPlatformKind()) { 440 case DWORD: 441 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 442 case QWORD: 443 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 444 default: 445 throw GraalError.shouldNotReachHere(); 446 } 447 } 448 449 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 450 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 451 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 452 return result; 453 } 454 455 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 456 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 457 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 458 return result; 459 } 460 461 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 462 // Issue a zero extending load of the proper bit size and set the result to 463 // the proper kind. 464 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 465 switch (memoryKind) { 466 case BYTE: 467 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 468 break; 469 case WORD: 470 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 471 break; 472 case DWORD: 473 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 474 break; 475 case QWORD: 476 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 477 break; 478 default: 479 throw GraalError.shouldNotReachHere(); 480 } 481 return result; 482 } 483 484 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 485 LIRKind kind = LIRKind.combine(a, b); 486 487 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 488 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 489 } 490 491 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 492 LIRKind kind = LIRKind.combine(a, b); 493 494 RegisterValue rax = moveToReg(AMD64.rax, a); 495 RegisterValue rdx = AMD64.rdx.asValue(kind); 496 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 497 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 498 } 499 500 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 501 AMD64MulDivOp op; 502 switch ((AMD64Kind) a.getPlatformKind()) { 503 case DWORD: 504 op = emitIDIV(DWORD, a, b, state); 505 break; 506 case QWORD: 507 op = emitIDIV(QWORD, a, b, state); 508 break; 509 default: 510 throw GraalError.shouldNotReachHere(); 511 } 512 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 513 } 514 515 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 516 AMD64MulDivOp op; 517 switch ((AMD64Kind) a.getPlatformKind()) { 518 case DWORD: 519 op = emitDIV(DWORD, a, b, state); 520 break; 521 case QWORD: 522 op = emitDIV(QWORD, a, b, state); 523 break; 524 default: 525 throw GraalError.shouldNotReachHere(); 526 } 527 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 528 } 529 530 @Override 531 public Value emitDiv(Value a, Value b, LIRFrameState state) { 532 boolean isAvx = supportAVX(); 533 LIRKind resultKind = LIRKind.combine(a, b); 534 switch ((AMD64Kind) a.getPlatformKind()) { 535 case DWORD: 536 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 537 return getLIRGen().emitMove(op.getQuotient()); 538 case QWORD: 539 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 540 return getLIRGen().emitMove(lop.getQuotient()); 541 case SINGLE: 542 if (isAvx) { 543 return emitBinary(resultKind, VDIVSS, a, b); 544 } else { 545 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 546 } 547 case DOUBLE: 548 if (isAvx) { 549 return emitBinary(resultKind, VDIVSD, a, b); 550 } else { 551 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 552 } 553 default: 554 throw GraalError.shouldNotReachHere(); 555 } 556 } 557 558 @Override 559 public Value emitRem(Value a, Value b, LIRFrameState state) { 560 switch ((AMD64Kind) a.getPlatformKind()) { 561 case DWORD: 562 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 563 return getLIRGen().emitMove(op.getRemainder()); 564 case QWORD: 565 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 566 return getLIRGen().emitMove(lop.getRemainder()); 567 case SINGLE: { 568 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 569 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 570 return result; 571 } 572 case DOUBLE: { 573 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 574 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 575 return result; 576 } 577 default: 578 throw GraalError.shouldNotReachHere(); 579 } 580 } 581 582 @Override 583 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 584 AMD64MulDivOp op; 585 switch ((AMD64Kind) a.getPlatformKind()) { 586 case DWORD: 587 op = emitDIV(DWORD, a, b, state); 588 break; 589 case QWORD: 590 op = emitDIV(QWORD, a, b, state); 591 break; 592 default: 593 throw GraalError.shouldNotReachHere(); 594 } 595 return getLIRGen().emitMove(op.getQuotient()); 596 } 597 598 @Override 599 public Variable emitURem(Value a, Value b, LIRFrameState state) { 600 AMD64MulDivOp op; 601 switch ((AMD64Kind) a.getPlatformKind()) { 602 case DWORD: 603 op = emitDIV(DWORD, a, b, state); 604 break; 605 case QWORD: 606 op = emitDIV(QWORD, a, b, state); 607 break; 608 default: 609 throw GraalError.shouldNotReachHere(); 610 } 611 return getLIRGen().emitMove(op.getRemainder()); 612 } 613 614 @Override 615 public Variable emitAnd(Value a, Value b) { 616 LIRKind resultKind = LIRKind.combine(a, b); 617 switch ((AMD64Kind) a.getPlatformKind()) { 618 case DWORD: 619 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 620 case QWORD: 621 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 622 case SINGLE: 623 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 624 case DOUBLE: 625 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 626 default: 627 throw GraalError.shouldNotReachHere(); 628 } 629 } 630 631 @Override 632 public Variable emitOr(Value a, Value b) { 633 boolean isAvx = supportAVX(); 634 LIRKind resultKind = LIRKind.combine(a, b); 635 switch ((AMD64Kind) a.getPlatformKind()) { 636 case DWORD: 637 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 638 case QWORD: 639 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 640 case SINGLE: 641 if (isAvx) { 642 return emitBinary(resultKind, VORPS, a, b); 643 } else { 644 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 645 } 646 case DOUBLE: 647 if (isAvx) { 648 return emitBinary(resultKind, VORPD, a, b); 649 } else { 650 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 651 } 652 default: 653 throw GraalError.shouldNotReachHere(); 654 } 655 } 656 657 @Override 658 public Variable emitXor(Value a, Value b) { 659 boolean isAvx = supportAVX(); 660 LIRKind resultKind = LIRKind.combine(a, b); 661 switch ((AMD64Kind) a.getPlatformKind()) { 662 case DWORD: 663 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 664 case QWORD: 665 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 666 case SINGLE: 667 if (isAvx) { 668 return emitBinary(resultKind, VXORPS, a, b); 669 } else { 670 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 671 } 672 case DOUBLE: 673 if (isAvx) { 674 return emitBinary(resultKind, VXORPD, a, b); 675 } else { 676 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 677 } 678 default: 679 throw GraalError.shouldNotReachHere(); 680 } 681 } 682 683 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 684 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 685 AllocatableValue input = asAllocatable(a); 686 if (isJavaConstant(b)) { 687 JavaConstant c = asJavaConstant(b); 688 if (c.asLong() == 1) { 689 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 690 } else { 691 /* 692 * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is 693 * always correct, even without the NumUtil.is32bit() test. 694 */ 695 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong())); 696 } 697 } else { 698 getLIRGen().emitMove(RCX_I, b); 699 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 700 } 701 return result; 702 } 703 704 @Override 705 public Variable emitShl(Value a, Value b) { 706 switch ((AMD64Kind) a.getPlatformKind()) { 707 case DWORD: 708 return emitShift(SHL, DWORD, a, b); 709 case QWORD: 710 return emitShift(SHL, QWORD, a, b); 711 default: 712 throw GraalError.shouldNotReachHere(); 713 } 714 } 715 716 @Override 717 public Variable emitShr(Value a, Value b) { 718 switch ((AMD64Kind) a.getPlatformKind()) { 719 case DWORD: 720 return emitShift(SAR, DWORD, a, b); 721 case QWORD: 722 return emitShift(SAR, QWORD, a, b); 723 default: 724 throw GraalError.shouldNotReachHere(); 725 } 726 } 727 728 @Override 729 public Variable emitUShr(Value a, Value b) { 730 switch ((AMD64Kind) a.getPlatformKind()) { 731 case DWORD: 732 return emitShift(SHR, DWORD, a, b); 733 case QWORD: 734 return emitShift(SHR, QWORD, a, b); 735 default: 736 throw GraalError.shouldNotReachHere(); 737 } 738 } 739 740 public Variable emitRol(Value a, Value b) { 741 switch ((AMD64Kind) a.getPlatformKind()) { 742 case DWORD: 743 return emitShift(ROL, DWORD, a, b); 744 case QWORD: 745 return emitShift(ROL, QWORD, a, b); 746 default: 747 throw GraalError.shouldNotReachHere(); 748 } 749 } 750 751 @Override 752 public Variable emitRor(Value a, Value b) { 753 switch ((AMD64Kind) a.getPlatformKind()) { 754 case DWORD: 755 return emitShift(ROR, DWORD, a, b); 756 case QWORD: 757 return emitShift(ROR, QWORD, a, b); 758 default: 759 throw GraalError.shouldNotReachHere(); 760 } 761 } 762 763 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 764 Variable result = getLIRGen().newVariable(kind); 765 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 766 return result; 767 } 768 769 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 770 Variable result = getLIRGen().newVariable(kind); 771 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 772 return result; 773 } 774 775 @Override 776 public Value emitReinterpret(LIRKind to, Value inputVal) { 777 ValueKind<?> from = inputVal.getValueKind(); 778 if (to.equals(from)) { 779 return inputVal; 780 } 781 782 AllocatableValue input = asAllocatable(inputVal); 783 /* 784 * Conversions between integer to floating point types require moves between CPU and FPU 785 * registers. 786 */ 787 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 788 switch ((AMD64Kind) to.getPlatformKind()) { 789 case DWORD: 790 switch (fromKind) { 791 case SINGLE: 792 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 793 } 794 break; 795 case QWORD: 796 switch (fromKind) { 797 case DOUBLE: 798 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 799 } 800 break; 801 case SINGLE: 802 switch (fromKind) { 803 case DWORD: 804 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 805 } 806 break; 807 case DOUBLE: 808 switch (fromKind) { 809 case QWORD: 810 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 811 } 812 break; 813 } 814 throw GraalError.shouldNotReachHere(); 815 } 816 817 @Override 818 public Value emitFloatConvert(FloatConvert op, Value input) { 819 switch (op) { 820 case D2F: 821 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 822 case D2I: 823 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 824 case D2L: 825 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 826 case F2D: 827 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 828 case F2I: 829 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 830 case F2L: 831 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 832 case I2D: 833 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 834 case I2F: 835 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 836 case L2D: 837 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 838 case L2F: 839 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 840 default: 841 throw GraalError.shouldNotReachHere(); 842 } 843 } 844 845 @Override 846 public Value emitNarrow(Value inputVal, int bits) { 847 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 848 // TODO make it possible to reinterpret Long as Int in LIR without move 849 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 850 } else { 851 return inputVal; 852 } 853 } 854 855 @Override 856 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 857 assert fromBits <= toBits && toBits <= 64; 858 if (fromBits == toBits) { 859 return inputVal; 860 } else if (toBits > 32) { 861 // sign extend to 64 bits 862 switch (fromBits) { 863 case 8: 864 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 865 case 16: 866 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 867 case 32: 868 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 869 default: 870 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 871 } 872 } else { 873 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 874 switch (fromBits) { 875 case 8: 876 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 877 case 16: 878 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 879 case 32: 880 return inputVal; 881 default: 882 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 883 } 884 } 885 } 886 887 @Override 888 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 889 assert fromBits <= toBits && toBits <= 64; 890 if (fromBits == toBits) { 891 return inputVal; 892 } else if (fromBits > 32) { 893 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 894 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 895 long mask = CodeUtil.mask(fromBits); 896 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 897 return result; 898 } else { 899 LIRKind resultKind = LIRKind.combine(inputVal); 900 if (toBits > 32) { 901 resultKind = resultKind.changeType(AMD64Kind.QWORD); 902 } else { 903 resultKind = resultKind.changeType(AMD64Kind.DWORD); 904 } 905 906 /* 907 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 908 * operations implicitly set the upper half of the register to 0, which is what we want 909 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 910 * sometimes one byte shorter. 911 */ 912 switch (fromBits) { 913 case 8: 914 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 915 case 16: 916 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 917 case 32: 918 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 919 } 920 921 // odd bit count, fall back on manual masking 922 Variable result = getLIRGen().newVariable(resultKind); 923 JavaConstant mask; 924 if (toBits > 32) { 925 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 926 } else { 927 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 928 } 929 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 930 return result; 931 } 932 } 933 934 @Override 935 public Variable emitBitCount(Value value) { 936 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 937 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 938 if (value.getPlatformKind() == AMD64Kind.QWORD) { 939 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 940 } else { 941 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 942 } 943 return result; 944 } 945 946 @Override 947 public Variable emitBitScanForward(Value value) { 948 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 949 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 950 return result; 951 } 952 953 @Override 954 public Variable emitBitScanReverse(Value value) { 955 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 956 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 957 if (value.getPlatformKind() == AMD64Kind.QWORD) { 958 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 959 } else { 960 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 961 } 962 return result; 963 } 964 965 @Override 966 public Value emitCountLeadingZeros(Value value) { 967 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 968 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 969 if (value.getPlatformKind() == AMD64Kind.QWORD) { 970 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 971 } else { 972 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 973 } 974 return result; 975 } 976 977 @Override 978 public Value emitCountTrailingZeros(Value value) { 979 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 980 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 981 if (value.getPlatformKind() == AMD64Kind.QWORD) { 982 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 983 } else { 984 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 985 } 986 return result; 987 } 988 989 @Override 990 public Value emitLogicalAndNot(Value value1, Value value2) { 991 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 992 993 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 994 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 995 } else { 996 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 997 } 998 return result; 999 } 1000 1001 @Override 1002 public Value emitLowestSetIsolatedBit(Value value) { 1003 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1004 1005 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1006 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 1007 } else { 1008 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 1009 } 1010 1011 return result; 1012 } 1013 1014 @Override 1015 public Value emitGetMaskUpToLowestSetBit(Value value) { 1016 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1017 1018 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1019 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 1020 } else { 1021 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 1022 } 1023 1024 return result; 1025 } 1026 1027 @Override 1028 public Value emitResetLowestSetBit(Value value) { 1029 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1030 1031 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1032 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 1033 } else { 1034 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 1035 } 1036 1037 return result; 1038 } 1039 1040 @Override 1041 public Value emitMathAbs(Value input) { 1042 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1043 switch ((AMD64Kind) input.getPlatformKind()) { 1044 case SINGLE: 1045 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 1046 break; 1047 case DOUBLE: 1048 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1049 break; 1050 default: 1051 throw GraalError.shouldNotReachHere(); 1052 } 1053 return result; 1054 } 1055 1056 @Override 1057 public Value emitMathSqrt(Value input) { 1058 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1059 switch ((AMD64Kind) input.getPlatformKind()) { 1060 case SINGLE: 1061 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1062 break; 1063 case DOUBLE: 1064 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1065 break; 1066 default: 1067 throw GraalError.shouldNotReachHere(); 1068 } 1069 return result; 1070 } 1071 1072 @Override 1073 public Value emitMathLog(Value input, boolean base10) { 1074 if (base10) { 1075 return new AMD64MathLog10Op().emitLIRWrapper(getLIRGen(), input); 1076 } else { 1077 return new AMD64MathLogOp().emitLIRWrapper(getLIRGen(), input); 1078 } 1079 } 1080 1081 @Override 1082 public Value emitMathCos(Value input) { 1083 return new AMD64MathCosOp().emitLIRWrapper(getLIRGen(), input); 1084 } 1085 1086 @Override 1087 public Value emitMathSin(Value input) { 1088 return new AMD64MathSinOp().emitLIRWrapper(getLIRGen(), input); 1089 } 1090 1091 @Override 1092 public Value emitMathTan(Value input) { 1093 return new AMD64MathTanOp().emitLIRWrapper(getLIRGen(), input); 1094 } 1095 1096 @Override 1097 public Value emitMathExp(Value input) { 1098 return new AMD64MathExpOp().emitLIRWrapper(getLIRGen(), input); 1099 } 1100 1101 @Override 1102 public Value emitMathPow(Value x, Value y) { 1103 return new AMD64MathPowOp().emitLIRWrapper(getLIRGen(), x, y); 1104 } 1105 1106 protected AMD64LIRGenerator getAMD64LIRGen() { 1107 return (AMD64LIRGenerator) getLIRGen(); 1108 } 1109 1110 @Override 1111 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1112 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1113 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1114 switch ((AMD64Kind) kind.getPlatformKind()) { 1115 case BYTE: 1116 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1117 break; 1118 case WORD: 1119 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1120 break; 1121 case DWORD: 1122 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1123 break; 1124 case QWORD: 1125 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1126 break; 1127 case SINGLE: 1128 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1129 break; 1130 case DOUBLE: 1131 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1132 break; 1133 default: 1134 throw GraalError.shouldNotReachHere(); 1135 } 1136 return result; 1137 } 1138 1139 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1140 Constant c = value.getConstant(); 1141 if (JavaConstant.isNull(c)) { 1142 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1143 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1144 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1145 return; 1146 } else if (c instanceof VMConstant) { 1147 // only 32-bit constants can be patched 1148 if (kind == AMD64Kind.DWORD) { 1149 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1150 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1151 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1152 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1153 return; 1154 } 1155 } 1156 } else { 1157 JavaConstant jc = (JavaConstant) c; 1158 assert jc.getJavaKind().isPrimitive(); 1159 1160 AMD64MIOp op = AMD64MIOp.MOV; 1161 OperandSize size; 1162 long imm; 1163 1164 switch (kind) { 1165 case BYTE: 1166 op = AMD64MIOp.MOVB; 1167 size = BYTE; 1168 imm = jc.asInt(); 1169 break; 1170 case WORD: 1171 size = WORD; 1172 imm = jc.asInt(); 1173 break; 1174 case DWORD: 1175 size = DWORD; 1176 imm = jc.asInt(); 1177 break; 1178 case QWORD: 1179 size = QWORD; 1180 imm = jc.asLong(); 1181 break; 1182 case SINGLE: 1183 size = DWORD; 1184 imm = Float.floatToRawIntBits(jc.asFloat()); 1185 break; 1186 case DOUBLE: 1187 size = QWORD; 1188 imm = Double.doubleToRawLongBits(jc.asDouble()); 1189 break; 1190 default: 1191 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1192 } 1193 1194 if (NumUtil.isInt(imm)) { 1195 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1196 return; 1197 } 1198 } 1199 1200 // fallback: load, then store 1201 emitStore(kind, address, asAllocatable(value), state); 1202 } 1203 1204 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1205 switch (kind) { 1206 case BYTE: 1207 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1208 break; 1209 case WORD: 1210 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1211 break; 1212 case DWORD: 1213 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1214 break; 1215 case QWORD: 1216 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1217 break; 1218 case SINGLE: 1219 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1220 break; 1221 case DOUBLE: 1222 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1223 break; 1224 default: 1225 throw GraalError.shouldNotReachHere(); 1226 } 1227 } 1228 1229 @Override 1230 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1231 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1232 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1233 if (isConstantValue(input)) { 1234 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1235 } else { 1236 emitStore(kind, storeAddress, asAllocatable(input), state); 1237 } 1238 } 1239 1240 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1241 /* Uncompressed null pointers only */ 1242 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1243 } 1244 1245 @Override 1246 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1247 OperandSize size; 1248 switch (cmpKind) { 1249 case BYTE: 1250 size = BYTE; 1251 break; 1252 case WORD: 1253 size = WORD; 1254 break; 1255 case DWORD: 1256 size = DWORD; 1257 break; 1258 case QWORD: 1259 size = QWORD; 1260 break; 1261 case SINGLE: 1262 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1263 return; 1264 case DOUBLE: 1265 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1266 return; 1267 default: 1268 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1269 } 1270 1271 if (isConstantValue(right)) { 1272 Constant c = LIRValueUtil.asConstant(right); 1273 if (JavaConstant.isNull(c)) { 1274 if (mustReplaceNullWithNullRegister(c)) { 1275 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1276 } else { 1277 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1278 } 1279 return; 1280 } else if (c instanceof VMConstant) { 1281 VMConstant vc = (VMConstant) c; 1282 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1283 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1284 } else { 1285 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1286 } 1287 return; 1288 } else if (c instanceof JavaConstant) { 1289 JavaConstant jc = (JavaConstant) c; 1290 if (jc.isDefaultForKind()) { 1291 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1292 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1293 return; 1294 } else if (NumUtil.is32bit(jc.asLong())) { 1295 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1296 return; 1297 } 1298 } 1299 } 1300 1301 // fallback: load, then compare 1302 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1303 } 1304 1305 @Override 1306 public Value emitRound(Value value, RoundingMode mode) { 1307 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1308 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1309 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1310 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1311 } else { 1312 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1313 } 1314 return result; 1315 } 1316 1317 private boolean supportAVX() { 1318 TargetDescription target = getLIRGen().target(); 1319 return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 1320 } 1321 1322 private static AVXSize getRegisterSize(Value a) { 1323 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 1324 if (kind.isXMM()) { 1325 return AVXKind.getRegisterSize(kind); 1326 } else { 1327 return AVXSize.XMM; 1328 } 1329 } 1330 1331 private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) { 1332 Variable result = getLIRGen().newVariable(resultKind); 1333 getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b))); 1334 return result; 1335 } 1336 1337 }